source
stringlengths
3
92
c
stringlengths
26
2.25M
omp_taskyield.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int test_omp_taskyield() { int i; int count = 0; int start_tid[NUM_TASKS]; int current_tid[NUM_TASKS]; for (i=0; i< NUM_TASKS; i++) { start_tid[i]=0; current_tid[i]=0; } #pragma omp parallel { #pragma omp single { for (i = 0; i < NUM_TASKS; i++) { int myi = i; #pragma omp task untied { my_sleep(SLEEPTIME); start_tid[myi] = omp_get_thread_num(); #pragma omp taskyield if((start_tid[myi] %2) ==0){ my_sleep(SLEEPTIME); current_tid[myi] = omp_get_thread_num(); } /*end of if*/ } /* end of omp task */ } /* end of for */ } /* end of single */ } /* end of parallel */ for (i=0;i<NUM_TASKS; i++) { //printf("start_tid[%d]=%d, current_tid[%d]=%d\n", //i, start_tid[i], i , current_tid[i]); if (current_tid[i] == start_tid[i]) count++; } return (count<NUM_TASKS); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_taskyield()) { num_failed++; } } return num_failed; }
recolorCube.c
/* * Copyright 2014 NeuroData (http://neurodata.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Recolor Slice Function * Naive implementation */ #include<stdint.h> #include<omp.h> #include<ndlib.h> /*OpenMP implementation for 32-bit annotations*/ void recolorCubeOMP32 ( uint32_t * cutout, int xdim, int ydim, uint32_t * imagemap, uint32_t * rgbColor) { int i,j; #pragma omp parallel num_threads( omp_get_max_threads() ) { #pragma omp for private(i,j) schedule(dynamic) for ( i=0; i<xdim; i++) for ( j=0; j<ydim; j++) if ( cutout [(i*ydim)+j] != 0 ) imagemap [(i*ydim)+j] = rgbColor[ cutout [(i*ydim)+j] % 217 ]; } } /*OpenMP implementation for 64-bit annotations*/ void recolorCubeOMP64 ( uint64_t * cutout, int xdim, int ydim, uint64_t * imagemap, uint64_t * rgbColor) { int i,j; #pragma omp parallel num_threads( omp_get_max_threads() ) { #pragma omp for private(i,j) schedule(dynamic) for ( i=0; i<xdim; i++) for ( j=0; j<ydim; j++) if ( cutout [(i*ydim)+j] != 0 ) imagemap [(i*ydim)+j] = rgbColor[ cutout [(i*ydim)+j] % 217 ]; } }
securezip_fmt_plug.c
/* * JtR format to crack PKWARE's SecureZIP archives. The same archive format is * used by "Directory Opus" software. * * See "APPNOTE-6.3.4.TXT" for more information about SecureZIP. * * This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Big thanks goes to PKWARE for documenting the archive format, and 7-Zip * project for implementing the specification. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_securezip; #elif FMT_REGISTERS_H john_register_one(&fmt_securezip); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "arch.h" #include "misc.h" #include "sha.h" #include "aes.h" #include "jumbo.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "securezip_common.h" #include "memdbg.h" #define FORMAT_LABEL "securezip" #define FORMAT_NAME "PKWARE SecureZIP" #define ALGORITHM_NAME "SHA1 AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #ifndef SHA1_SIZE #define SHA1_SIZE 20 #endif #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int any_cracked, *cracked; static size_t cracked_size; static struct custom_salt *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); any_cracked = 0; cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt; cracked = mem_calloc(cracked_size, 1); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void securezip_set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } // The KDF is not quite HMAC-SHA1 static int securezip_decrypt(struct custom_salt *cur_salt, char *password) { unsigned char digest[SHA1_SIZE]; unsigned char key[SHA1_SIZE * 2]; unsigned char buf[64]; unsigned char ivec[16]; unsigned char out[ERDLEN]; SHA_CTX ctx; unsigned int i; AES_KEY aes_decrypt_key; // 1 SHA1_Init(&ctx); SHA1_Update(&ctx, password, strlen(password)); SHA1_Final(digest, &ctx); // 2 memset(buf, 0x36, 64); for (i = 0; i < SHA1_SIZE; i++) buf[i] ^= digest[i]; SHA1_Init(&ctx); SHA1_Update(&ctx, buf, 64); SHA1_Final(key, &ctx); // 3 memset(buf, 0x5c, 64); for (i = 0; i < SHA1_SIZE; i++) buf[i] ^= digest[i]; SHA1_Init(&ctx); SHA1_Update(&ctx, buf, 64); SHA1_Final(key + SHA1_SIZE, &ctx); // Decrypt ERD AES_set_decrypt_key(key, cur_salt->bit_length, &aes_decrypt_key); memcpy(ivec, cur_salt->iv, 16); AES_cbc_encrypt(cur_salt->erd, out, cur_salt->erd_length, &aes_decrypt_key, ivec, AES_DECRYPT); // Check padding, 8 bytes out of 16 should be enough. return memcmp(out + cur_salt->erd_length - 16, "\x10\x10\x10\x10\x10\x10\x10\x10", 8) == 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { if (securezip_decrypt(cur_salt, saved_key[index])) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_securezip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, securezip_tests }, { init, done, fmt_default_reset, fmt_default_prepare, securezip_common_valid, fmt_default_split, fmt_default_binary, securezip_common_get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, securezip_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-12,16),ceild(4*t2-Nz-19,32));t3<=min(min(floord(4*Nt+Ny-9,32),floord(2*t1+Ny-3,32)),floord(4*t2+Ny-9,32));t3++) { for (t4=max(max(ceild(t1-252,256),ceild(4*t2-Nz-499,512)),ceild(32*t3-Ny-499,512));t4<=min(min(min(floord(4*Nt+Nx-9,512),floord(2*t1+Nx-3,512)),floord(4*t2+Nx-9,512)),floord(32*t3+Nx+19,512));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(512*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
prime0.c
#include <stdio.h> #define LEFT 30000000 #define RIGHT 30000200 int main() { int i, j; int mark; #pragma omp parallel for private(j, mark) for (i=LEFT;i<=RIGHT;++i) { mark=1; for (j=2;j<i/2;++j) { if (i%j==0) { mark=0; break; } } if (mark==1) { printf("%d is a prime\n", i); } } return 0; }
AwFmParallelSearch.c
#include "AwFmIndex.h" #include "AwFmIndexStruct.h" #include "AwFmParallelSearch.h" #include "AwFmSearch.h" #include "AwFmKmerTable.h" #include "AwFmFile.h" #include "AwFmLetter.h" #include "AwFmSuffixArray.h" #include <stdint.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #define NUM_CONCURRENT_QUERIES 32 #define DEFAULT_POSITION_LIST_CAPACITY 4 void parallelSearchFindKmerSeedsForBlock(const struct AwFmIndex *restrict const index, struct AwFmKmerSearchList *restrict const searchList, struct AwFmSearchRange *restrict const ranges, const size_t threadBlockStartIndex, const size_t threadBlockEndIndex); void parallelSearchExtendKmersInBlock(const struct AwFmIndex *restrict const index, struct AwFmKmerSearchList *restrict const searchList, struct AwFmSearchRange *restrict const ranges, const size_t threadBlockStartIndex, const size_t threadBlockEndIndex); void parallelSearchTracebackPositionLists(const struct AwFmIndex *restrict const index, struct AwFmKmerSearchList *restrict const searchList, struct AwFmSearchRange *restrict const ranges, const size_t threadBlockStartIndex, const size_t threadBlockEndIndex); bool setPositionListCount(struct AwFmKmerSearchData *restrict const searchData, uint32_t count); struct AwFmKmerSearchList *awFmCreateKmerSearchList(const size_t capacity){ // struct AwFmKmerSearchList *searchList = aligned_alloc(AW_FM_CACHE_LINE_SIZE_IN_BYTES, // sizeof(struct AwFmKmerSearchList)); struct AwFmKmerSearchList *searchList = malloc(sizeof(struct AwFmKmerSearchList)); if(searchList == NULL){ return NULL; } searchList->capacity = capacity; searchList->count = 0; // searchList->kmerSearchData = aligned_alloc(AW_FM_CACHE_LINE_SIZE_IN_BYTES, // capacity * sizeof(struct AwFmKmerSearchData)); searchList->kmerSearchData = malloc(capacity * sizeof(struct AwFmKmerSearchData)); if(searchList->kmerSearchData == NULL){ free(searchList); return NULL; } bool positionListAllocationFailed = false; for(size_t i = 0; i < capacity; i++){ searchList->kmerSearchData[i].kmerString = NULL; searchList->kmerSearchData[i].kmerLength = 0; searchList->kmerSearchData[i].capacity = DEFAULT_POSITION_LIST_CAPACITY; searchList->kmerSearchData[i].count = 0; searchList->kmerSearchData[i].positionList = malloc( DEFAULT_POSITION_LIST_CAPACITY * sizeof(uint64_t)); //check for an allocation failure positionListAllocationFailed |= (searchList->kmerSearchData[i].positionList == NULL); } //if any of the allocations failed, dealloc everything and return NULL. if(positionListAllocationFailed){ for(size_t i = 0; i < capacity; i++){ free(searchList->kmerSearchData[i].positionList); } free(searchList->kmerSearchData); free(searchList); return NULL; } return searchList; } void awFmDeallocKmerSearchList(struct AwFmKmerSearchList *restrict const searchList){ for(size_t i = 0; i < searchList->capacity; i++){ free(searchList->kmerSearchData[i].positionList); } free(searchList->kmerSearchData); free(searchList); } void awFmParallelSearchLocate(const struct AwFmIndex *restrict const index, struct AwFmKmerSearchList *restrict const searchList, uint8_t numThreads){ const uint32_t searchListCount = searchList->count; if(numThreads > 1){ #pragma omp parallel for num_threads(numThreads) for(size_t threadBlockStartIndex = 0; threadBlockStartIndex < searchListCount; threadBlockStartIndex += AW_FM_NUM_CONCURRENT_QUERIES){ const size_t threadBlockEndIndex = threadBlockStartIndex + AW_FM_NUM_CONCURRENT_QUERIES > searchList->count? searchList->count: threadBlockStartIndex + AW_FM_NUM_CONCURRENT_QUERIES; struct AwFmSearchRange ranges[AW_FM_NUM_CONCURRENT_QUERIES]; parallelSearchFindKmerSeedsForBlock( index, searchList, ranges, threadBlockStartIndex, threadBlockEndIndex); parallelSearchExtendKmersInBlock( index, searchList, ranges, threadBlockStartIndex, threadBlockEndIndex); parallelSearchTracebackPositionLists( index, searchList, ranges, threadBlockStartIndex, threadBlockEndIndex); } } else{ //exact duplicate of above code, without the omp pragma, so it doesn't kill performance with only 1 thread. for(size_t threadBlockStartIndex = 0; threadBlockStartIndex < searchListCount; threadBlockStartIndex += AW_FM_NUM_CONCURRENT_QUERIES){ const size_t threadBlockEndIndex = threadBlockStartIndex + AW_FM_NUM_CONCURRENT_QUERIES > searchList->count? searchList->count: threadBlockStartIndex + AW_FM_NUM_CONCURRENT_QUERIES; struct AwFmSearchRange ranges[AW_FM_NUM_CONCURRENT_QUERIES]; parallelSearchFindKmerSeedsForBlock( index, searchList, ranges, threadBlockStartIndex, threadBlockEndIndex); parallelSearchExtendKmersInBlock( index, searchList, ranges, threadBlockStartIndex, threadBlockEndIndex); parallelSearchTracebackPositionLists( index, searchList, ranges, threadBlockStartIndex, threadBlockEndIndex); } } } void awFmParallelSearchCount(const struct AwFmIndex *restrict const index, struct AwFmKmerSearchList *restrict const searchList, uint8_t numThreads){ const uint32_t searchListCount = searchList->count; if(numThreads > 1){ #pragma omp parallel for num_threads(numThreads) for(size_t threadBlockStartIndex = 0; threadBlockStartIndex < searchListCount; threadBlockStartIndex += AW_FM_NUM_CONCURRENT_QUERIES){ const size_t threadBlockEndIndex = threadBlockStartIndex + AW_FM_NUM_CONCURRENT_QUERIES > searchList->count? searchList->count: threadBlockStartIndex + AW_FM_NUM_CONCURRENT_QUERIES; struct AwFmSearchRange ranges[AW_FM_NUM_CONCURRENT_QUERIES]; parallelSearchFindKmerSeedsForBlock( index, searchList, ranges, threadBlockStartIndex, threadBlockEndIndex); parallelSearchExtendKmersInBlock( index, searchList, ranges, threadBlockStartIndex, threadBlockEndIndex); //load the range lengths into the count member variables. for(size_t i = threadBlockStartIndex; i < threadBlockEndIndex; i++){ searchList->kmerSearchData[i].count = awFmSearchRangeLength(&ranges[i-threadBlockStartIndex]); } } } else{ //exact duplicate of above code, without the omp pragma, so it doesn't kill performance with only 1 thread. for(size_t threadBlockStartIndex = 0; threadBlockStartIndex < searchListCount; threadBlockStartIndex += AW_FM_NUM_CONCURRENT_QUERIES){ const size_t threadBlockEndIndex = threadBlockStartIndex + AW_FM_NUM_CONCURRENT_QUERIES > searchList->count? searchList->count: threadBlockStartIndex + AW_FM_NUM_CONCURRENT_QUERIES; struct AwFmSearchRange ranges[AW_FM_NUM_CONCURRENT_QUERIES]; parallelSearchFindKmerSeedsForBlock( index, searchList, ranges, threadBlockStartIndex, threadBlockEndIndex); parallelSearchExtendKmersInBlock( index, searchList, ranges, threadBlockStartIndex, threadBlockEndIndex); //load the range lengths into the count member variables. for(size_t i = threadBlockStartIndex; i < threadBlockEndIndex; i++){ searchList->kmerSearchData[i].count = awFmSearchRangeLength(&ranges[i-threadBlockStartIndex]); } } } } void parallelSearchFindKmerSeedsForBlock(const struct AwFmIndex *restrict const index, struct AwFmKmerSearchList *restrict const searchList, struct AwFmSearchRange *restrict const ranges, const size_t threadBlockStartIndex, const size_t threadBlockEndIndex){ for(size_t kmerIndex = threadBlockStartIndex; kmerIndex < threadBlockEndIndex; kmerIndex++){ const struct AwFmKmerSearchData *searchData = &searchList->kmerSearchData[kmerIndex]; const uint8_t kmerLength = searchData->kmerLength; const char *kmerString = searchData->kmerString; const uint64_t rangesIndex = kmerIndex - threadBlockStartIndex; if(index->config.alphabetType == AwFmAlphabetNucleotide){ //TODO: reimplement partial seeded search when it's implementable if(kmerLength < index->config.kmerLengthInSeedTable){ awFmNucleotideNonSeededSearch(index, kmerString, kmerLength, &ranges[rangesIndex]); } else{ ranges[rangesIndex] = awFmNucleotideKmerSeedRangeFromTable(index, kmerString, kmerLength); } } else{ if(kmerLength < index->config.kmerLengthInSeedTable){ awFmAminoNonSeededSearch(index, kmerString, kmerLength, &ranges[rangesIndex]); } else{ ranges[rangesIndex] = awFmAminoKmerSeedRangeFromTable(index, kmerString, kmerLength); } } } } void parallelSearchExtendKmersInBlock(const struct AwFmIndex *restrict const index, struct AwFmKmerSearchList *restrict const searchList, struct AwFmSearchRange *restrict const ranges, const size_t threadBlockStartIndex, const size_t threadBlockEndIndex){ bool hasActiveQueries = true; uint8_t currentKmerLetterIndex = index->config.kmerLengthInSeedTable; while(hasActiveQueries){ currentKmerLetterIndex++; hasActiveQueries = false; for(size_t kmerIndex = threadBlockStartIndex; kmerIndex < threadBlockEndIndex; kmerIndex++){ const uint64_t rangesIndex = kmerIndex - threadBlockStartIndex; const struct AwFmKmerSearchData *restrict const searchData = &searchList->kmerSearchData[kmerIndex]; const uint8_t kmerLength = searchData->kmerLength; const char *kmerString = searchData->kmerString; if((kmerLength >= currentKmerLetterIndex) && awFmSearchRangeIsValid(&ranges[rangesIndex])){ hasActiveQueries = true; const uint8_t currentQueryLetterIndex = kmerLength - currentKmerLetterIndex; if(index->config.alphabetType == AwFmAlphabetNucleotide){ const uint8_t queryLetterIndex = awFmAsciiNucleotideToLetterIndex(kmerString[currentQueryLetterIndex]); awFmNucleotideIterativeStepBackwardSearch(index, &ranges[rangesIndex], queryLetterIndex); } else{ const uint8_t queryLetterIndex = awFmAsciiAminoAcidToLetterIndex(kmerString[currentQueryLetterIndex]); awFmAminoIterativeStepBackwardSearch(index, &ranges[rangesIndex], queryLetterIndex); } } } } } void parallelSearchTracebackPositionLists(const struct AwFmIndex *restrict const index, struct AwFmKmerSearchList *restrict const searchList, struct AwFmSearchRange *restrict const ranges, const size_t threadBlockStartIndex, const size_t threadBlockEndIndex){ for(size_t kmerIndex = threadBlockStartIndex; kmerIndex < threadBlockEndIndex; kmerIndex++){ const uint64_t rangesIndex = kmerIndex - threadBlockStartIndex; struct AwFmKmerSearchData *searchData = &searchList->kmerSearchData[kmerIndex]; const size_t rangeLength = awFmSearchRangeLength(&ranges[rangesIndex]); setPositionListCount(searchData, rangeLength); // struct AwFmBacktrace *restrict const backtracePositionList = { // .position = searchList->kmerSearchData[kmerIndex].positionList; // };searchList->kmerSearchData[kmerIndex].positionBacktraceList; for(size_t indexOfPositionToBacktrace = 0; indexOfPositionToBacktrace < rangeLength; indexOfPositionToBacktrace++){ //initialize the offset. struct AwFmBacktrace backtrace = { .position = ranges[rangesIndex].startPtr + indexOfPositionToBacktrace, .offset = 0 }; if(index->config.alphabetType == AwFmAlphabetNucleotide){ while(!awFmBwtPositionIsSampled(index, backtrace.position)){ backtrace.position = awFmNucleotideBacktraceBwtPosition(index, backtrace.position); backtrace.offset++; } } else{ while(!awFmBwtPositionIsSampled(index, backtrace.position)){ backtrace.position = awFmAminoBacktraceBwtPosition(index, backtrace.position); backtrace.offset++; } } awFmSuffixArrayReadPositionParallel(index, &backtrace); searchData->positionList[indexOfPositionToBacktrace] = backtrace.position; } } } bool setPositionListCount(struct AwFmKmerSearchData *restrict const searchData, uint32_t newCount){ if(__builtin_expect(searchData->capacity >= newCount, 1)){ searchData->count = newCount; } else{ const size_t newCapacity = newCount; const size_t newLengthInBytes = newCapacity * sizeof(uint64_t); void *tmpPtr = realloc(searchData->positionList, newLengthInBytes); if(__builtin_expect(tmpPtr == 0, 0)){ fprintf(stderr, "Critical memory failure: could not allocate memory for position list.\n"); return false; } searchData->capacity = newCapacity; searchData->count = newCount; searchData->positionList = tmpPtr; } return true; }
kmp_csupport.c
/* * kmp_csupport.c -- kfront linkage support for OpenMP. */ //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// #include "omp.h" /* extern "C" declarations of user-visible routines */ #include "kmp.h" #include "kmp_i18n.h" #include "kmp_itt.h" #include "kmp_error.h" #include "kmp_stats.h" #if OMPT_SUPPORT #include "ompt-internal.h" #include "ompt-specific.h" #endif #define MAX_MESSAGE 512 /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /* flags will be used in future, e.g., to implement */ /* openmp_strict library restrictions */ /*! * @ingroup STARTUP_SHUTDOWN * @param loc in source location information * @param flags in for future use (currently ignored) * * Initialize the runtime library. This call is optional; if it is not made then * it will be implicitly called by attempts to use other library functions. * */ void __kmpc_begin(ident_t *loc, kmp_int32 flags) { // By default __kmp_ignore_mppbeg() returns TRUE. if (__kmp_ignore_mppbeg() == FALSE) { __kmp_internal_begin(); KC_TRACE( 10, ("__kmpc_begin: called\n" ) ); } } /*! * @ingroup STARTUP_SHUTDOWN * @param loc source location information * * Shutdown the runtime library. This is also optional, and even if called will not * do anything unless the `KMP_IGNORE_MPPEND` environment variable is set to zero. */ void __kmpc_end(ident_t *loc) { // By default, __kmp_ignore_mppend() returns TRUE which makes __kmpc_end() call no-op. // However, this can be overridden with KMP_IGNORE_MPPEND environment variable. // If KMP_IGNORE_MPPEND is 0, __kmp_ignore_mppend() returns FALSE and __kmpc_end() // will unregister this root (it can cause library shut down). if (__kmp_ignore_mppend() == FALSE) { KC_TRACE( 10, ("__kmpc_end: called\n" ) ); KA_TRACE( 30, ("__kmpc_end\n" )); __kmp_internal_end_thread( -1 ); } } /*! @ingroup THREAD_STATES @param loc Source location information. @return The global thread index of the active thread. This function can be called in any context. If the runtime has ony been entered at the outermost level from a single (necessarily non-OpenMP<sup>*</sup>) thread, then the thread number is that which would be returned by omp_get_thread_num() in the outermost active parallel construct. (Or zero if there is no active parallel construct, since the master thread is necessarily thread zero). If multiple non-OpenMP threads all enter an OpenMP construct then this will be a unique thread identifier among all the threads created by the OpenMP runtime (but the value cannote be defined in terms of OpenMP thread ids returned by omp_get_thread_num()). */ kmp_int32 __kmpc_global_thread_num(ident_t *loc) { kmp_int32 gtid = __kmp_entry_gtid(); KC_TRACE( 10, ("__kmpc_global_thread_num: T#%d\n", gtid ) ); return gtid; } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads under control of the OpenMP<sup>*</sup> runtime This function can be called in any context. It returns the total number of threads under the control of the OpenMP runtime. That is not a number that can be determined by any OpenMP standard calls, since the library may be called from more than one non-OpenMP thread, and this reflects the total over all such calls. Similarly the runtime maintains underlying threads even when they are not active (since the cost of creating and destroying OS threads is high), this call counts all such threads even if they are not waiting for work. */ kmp_int32 __kmpc_global_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_global_num_threads: num_threads = %d\n", __kmp_nth ) ); return TCR_4(__kmp_nth); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The thread number of the calling thread in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_thread_num(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_thread_num: called\n" ) ); return __kmp_tid_from_gtid( __kmp_entry_gtid() ); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_num_threads: called\n" ) ); return __kmp_entry_thread() -> th.th_team -> t.t_nproc; } /*! * @ingroup DEPRECATED * @param loc location description * * This function need not be called. It always returns TRUE. */ kmp_int32 __kmpc_ok_to_fork(ident_t *loc) { #ifndef KMP_DEBUG return TRUE; #else const char *semi2; const char *semi3; int line_no; if (__kmp_par_range == 0) { return TRUE; } semi2 = loc->psource; if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2, ';'); if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2 + 1, ';'); if (semi2 == NULL) { return TRUE; } if (__kmp_par_range_filename[0]) { const char *name = semi2 - 1; while ((name > loc->psource) && (*name != '/') && (*name != ';')) { name--; } if ((*name == '/') || (*name == ';')) { name++; } if (strncmp(__kmp_par_range_filename, name, semi2 - name)) { return __kmp_par_range < 0; } } semi3 = strchr(semi2 + 1, ';'); if (__kmp_par_range_routine[0]) { if ((semi3 != NULL) && (semi3 > semi2) && (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) { return __kmp_par_range < 0; } } if (KMP_SSCANF(semi3 + 1, "%d", &line_no) == 1) { if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) { return __kmp_par_range > 0; } return __kmp_par_range < 0; } return TRUE; #endif /* KMP_DEBUG */ } /*! @ingroup THREAD_STATES @param loc Source location information. @return 1 if this thread is executing inside an active parallel region, zero if not. */ kmp_int32 __kmpc_in_parallel( ident_t *loc ) { return __kmp_entry_thread() -> th.th_root -> r.r_active; } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_threads number of threads requested for this parallel construct Set the number of threads to be used by the next fork spawned by this thread. This call is only required if the parallel construct has a `num_threads` clause. */ void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n", global_tid, num_threads ) ); __kmp_push_num_threads( loc, global_tid, num_threads ); } void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid ) { KA_TRACE( 20, ("__kmpc_pop_num_threads: enter\n" ) ); /* the num_threads are automatically popped */ } #if OMP_40_ENABLED void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, kmp_int32 proc_bind ) { KA_TRACE( 20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n", global_tid, proc_bind ) ); __kmp_push_proc_bind( loc, global_tid, (kmp_proc_bind_t)proc_bind ); } #endif /* OMP_40_ENABLED */ /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined parallel construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { int gtid = __kmp_entry_gtid(); #if (KMP_STATS_ENABLED) int inParallel = __kmpc_in_parallel(loc); if (inParallel) { KMP_COUNT_BLOCK(OMP_NESTED_PARALLEL); } else { KMP_STOP_EXPLICIT_TIMER(OMP_serial); KMP_COUNT_BLOCK(OMP_PARALLEL); } #endif // maybe to save thr_state is enough here { va_list ap; va_start( ap, microtask ); #if OMPT_SUPPORT int tid = __kmp_tid_from_gtid( gtid ); kmp_info_t *master_th = __kmp_threads[ gtid ]; kmp_team_t *parent_team = master_th->th.th_team; if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0); } #endif #if INCLUDE_SSC_MARKS SSC_MARK_FORKING(); #endif __kmp_fork_call( loc, gtid, fork_context_intel, argc, #if OMPT_SUPPORT VOLATILE_CAST(void *) microtask, // "unwrapped" task #endif VOLATILE_CAST(microtask_t) microtask, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_task_func, /* TODO: revert workaround for Intel(R) 64 tracker #96 */ #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); #if INCLUDE_SSC_MARKS SSC_MARK_JOINING(); #endif __kmp_join_call( loc, gtid #if OMPT_SUPPORT , fork_context_intel #endif ); va_end( ap ); #if OMPT_SUPPORT if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = 0; } #endif } #if (KMP_STATS_ENABLED) if (!inParallel) KMP_START_EXPLICIT_TIMER(OMP_serial); #endif } #if OMP_40_ENABLED /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_teams number of teams requested for the teams construct @param num_threads number of threads per team requested for the teams construct Set the number of teams to be used by the teams construct. This call is only required if the teams construct has a `num_teams` clause or a `thread_limit` clause (or both). */ void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n", global_tid, num_teams, num_threads ) ); __kmp_push_num_teams( loc, global_tid, num_teams, num_threads ); } /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined teams construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { int gtid = __kmp_entry_gtid(); kmp_info_t *this_thr = __kmp_threads[ gtid ]; va_list ap; va_start( ap, microtask ); KMP_COUNT_BLOCK(OMP_TEAMS); // remember teams entry point and nesting level this_thr->th.th_teams_microtask = microtask; this_thr->th.th_teams_level = this_thr->th.th_team->t.t_level; // AC: can be >0 on host #if OMPT_SUPPORT kmp_team_t *parent_team = this_thr->th.th_team; int tid = __kmp_tid_from_gtid( gtid ); if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0); } #endif // check if __kmpc_push_num_teams called, set default number of teams otherwise if ( this_thr->th.th_teams_size.nteams == 0 ) { __kmp_push_num_teams( loc, gtid, 0, 0 ); } KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1); __kmp_fork_call( loc, gtid, fork_context_intel, argc, #if OMPT_SUPPORT VOLATILE_CAST(void *) microtask, // "unwrapped" task #endif VOLATILE_CAST(microtask_t) __kmp_teams_master, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_teams_master, #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); __kmp_join_call( loc, gtid #if OMPT_SUPPORT , fork_context_intel #endif ); #if OMPT_SUPPORT if (ompt_enabled) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = NULL; } #endif this_thr->th.th_teams_microtask = NULL; this_thr->th.th_teams_level = 0; *(kmp_int64*)(&this_thr->th.th_teams_size) = 0L; va_end( ap ); } #endif /* OMP_40_ENABLED */ // // I don't think this function should ever have been exported. // The __kmpc_ prefix was misapplied. I'm fairly certain that no generated // openmp code ever called it, but it's been exported from the RTL for so // long that I'm afraid to remove the definition. // int __kmpc_invoke_task_func( int gtid ) { return __kmp_invoke_task_func( gtid ); } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Enter a serialized parallel construct. This interface is used to handle a conditional parallel region, like this, @code #pragma omp parallel if (condition) @endcode when the condition is false. */ void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { __kmp_serialized_parallel(loc, global_tid); /* The implementation is now in kmp_runtime.c so that it can share static functions with * kmp_fork_call since the tasks to be done are similar in each case. */ } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Leave a serialized parallel construct. */ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { kmp_internal_control_t *top; kmp_info_t *this_thr; kmp_team_t *serial_team; KC_TRACE( 10, ("__kmpc_end_serialized_parallel: called by T#%d\n", global_tid ) ); /* skip all this code for autopar serialized loops since it results in unacceptable overhead */ if( loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR ) ) return; // Not autopar code if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); this_thr = __kmp_threads[ global_tid ]; serial_team = this_thr->th.th_serial_team; #if OMP_41_ENABLED kmp_task_team_t * task_team = this_thr->th.th_task_team; // we need to wait for the proxy tasks before finishing the thread if ( task_team != NULL && task_team->tt.tt_found_proxy_tasks ) __kmp_task_team_wait(this_thr, serial_team, NULL ); // is an ITT object needed here? #endif KMP_MB(); KMP_DEBUG_ASSERT( serial_team ); KMP_ASSERT( serial_team -> t.t_serialized ); KMP_DEBUG_ASSERT( this_thr -> th.th_team == serial_team ); KMP_DEBUG_ASSERT( serial_team != this_thr->th.th_root->r.r_root_team ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads[0] == this_thr ); /* If necessary, pop the internal control stack values and replace the team values */ top = serial_team -> t.t_control_stack_top; if ( top && top -> serial_nesting_level == serial_team -> t.t_serialized ) { copy_icvs( &serial_team -> t.t_threads[0] -> th.th_current_task -> td_icvs, top ); serial_team -> t.t_control_stack_top = top -> next; __kmp_free(top); } //if( serial_team -> t.t_serialized > 1 ) serial_team -> t.t_level--; /* pop dispatch buffers stack */ KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer); { dispatch_private_info_t * disp_buffer = serial_team->t.t_dispatch->th_disp_buffer; serial_team->t.t_dispatch->th_disp_buffer = serial_team->t.t_dispatch->th_disp_buffer->next; __kmp_free( disp_buffer ); } -- serial_team -> t.t_serialized; if ( serial_team -> t.t_serialized == 0 ) { /* return to the parallel section */ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 if ( __kmp_inherit_fp_control && serial_team->t.t_fp_control_saved ) { __kmp_clear_x87_fpu_status_word(); __kmp_load_x87_fpu_control_word( &serial_team->t.t_x87_fpu_control_word ); __kmp_load_mxcsr( &serial_team->t.t_mxcsr ); } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ this_thr -> th.th_team = serial_team -> t.t_parent; this_thr -> th.th_info.ds.ds_tid = serial_team -> t.t_master_tid; /* restore values cached in the thread */ this_thr -> th.th_team_nproc = serial_team -> t.t_parent -> t.t_nproc; /* JPH */ this_thr -> th.th_team_master = serial_team -> t.t_parent -> t.t_threads[0]; /* JPH */ this_thr -> th.th_team_serialized = this_thr -> th.th_team -> t.t_serialized; /* TODO the below shouldn't need to be adjusted for serialized teams */ this_thr -> th.th_dispatch = & this_thr -> th.th_team -> t.t_dispatch[ serial_team -> t.t_master_tid ]; __kmp_pop_current_task_from_thread( this_thr ); KMP_ASSERT( this_thr -> th.th_current_task -> td_flags.executing == 0 ); this_thr -> th.th_current_task -> td_flags.executing = 1; if ( __kmp_tasking_mode != tskm_immediate_exec ) { // Copy the task team from the new child / old parent team to the thread. this_thr->th.th_task_team = this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]; KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d restoring task_team %p / team %p\n", global_tid, this_thr -> th.th_task_team, this_thr -> th.th_team ) ); } } else { if ( __kmp_tasking_mode != tskm_immediate_exec ) { KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d decreasing nesting depth of serial team %p to %d\n", global_tid, serial_team, serial_team -> t.t_serialized ) ); } } #if USE_ITT_BUILD kmp_uint64 cur_time = 0; #if USE_ITT_NOTIFY if ( __itt_get_timestamp_ptr ) { cur_time = __itt_get_timestamp(); } #endif /* USE_ITT_NOTIFY */ if ( this_thr->th.th_team->t.t_level == 0 #if OMP_40_ENABLED && this_thr->th.th_teams_microtask == NULL #endif ) { // Report the barrier this_thr->th.th_ident = loc; if ( ( __itt_frame_submit_v3_ptr || KMP_ITT_DEBUG ) && ( __kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 1 ) ) { __kmp_itt_frame_submit( global_tid, this_thr->th.th_frame_time_serialized, cur_time, 0, loc, this_thr->th.th_team_nproc, 0 ); if ( __kmp_forkjoin_frames_mode == 3 ) // Since barrier frame for serialized region is equal to the region we use the same begin timestamp as for the barrier. __kmp_itt_frame_submit( global_tid, serial_team->t.t_region_time, cur_time, 0, loc, this_thr->th.th_team_nproc, 2 ); } else if ( ( __itt_frame_end_v3_ptr || KMP_ITT_DEBUG ) && ! __kmp_forkjoin_frames_mode && __kmp_forkjoin_frames ) // Mark the end of the "parallel" region for VTune. Only use one of frame notification scheme at the moment. __kmp_itt_region_joined( global_tid, 1 ); } #endif /* USE_ITT_BUILD */ if ( __kmp_env_consistency_check ) __kmp_pop_parallel( global_tid, NULL ); } /*! @ingroup SYNCHRONIZATION @param loc source location information. Execute <tt>flush</tt>. This is implemented as a full memory fence. (Though depending on the memory ordering convention obeyed by the compiler even that may not be necessary). */ void __kmpc_flush(ident_t *loc) { KC_TRACE( 10, ("__kmpc_flush: called\n" ) ); /* need explicit __mf() here since use volatile instead in library */ KMP_MB(); /* Flush all pending memory write invalidates. */ #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 ) #if KMP_MIC // fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used. // We shouldn't need it, though, since the ABI rules require that // * If the compiler generates NGO stores it also generates the fence // * If users hand-code NGO stores they should insert the fence // therefore no incomplete unordered stores should be visible. #else // C74404 // This is to address non-temporal store instructions (sfence needed). // The clflush instruction is addressed either (mfence needed). // Probably the non-temporal load monvtdqa instruction should also be addressed. // mfence is a SSE2 instruction. Do not execute it if CPU is not SSE2. if ( ! __kmp_cpuinfo.initialized ) { __kmp_query_cpuid( & __kmp_cpuinfo ); }; // if if ( ! __kmp_cpuinfo.sse2 ) { // CPU cannot execute SSE2 instructions. } else { #if KMP_COMPILER_ICC || KMP_COMPILER_MSVC _mm_mfence(); #else __sync_synchronize(); #endif // KMP_COMPILER_ICC }; // if #endif // KMP_MIC #elif (KMP_ARCH_ARM || KMP_ARCH_AARCH64) // Nothing to see here move along #elif KMP_ARCH_PPC64 // Nothing needed here (we have a real MB above). #if KMP_OS_CNK // The flushing thread needs to yield here; this prevents a // busy-waiting thread from saturating the pipeline. flush is // often used in loops like this: // while (!flag) { // #pragma omp flush(flag) // } // and adding the yield here is good for at least a 10x speedup // when running >2 threads per core (on the NAS LU benchmark). __kmp_yield(TRUE); #endif #else #error Unknown or unsupported architecture #endif } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Execute a barrier. */ void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_BARRIER); KMP_TIME_BLOCK(OMP_barrier); KC_TRACE( 10, ("__kmpc_barrier: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? }; // if __kmp_check_barrier( global_tid, ct_barrier, loc ); } __kmp_threads[ global_tid ]->th.th_ident = loc; // TODO: explicit barrier_wait_id: // this function is called when 'barrier' directive is present or // implicit barrier at the end of a worksharing construct. // 1) better to add a per-thread barrier counter to a thread data structure // 2) set to 0 when a new team is created // 4) no sync is required __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } /* The BARRIER for a MASTER section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @return 1 if this thread should execute the <tt>master</tt> block, 0 otherwise. */ kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_MASTER); int status = 0; KC_TRACE( 10, ("__kmpc_master: called T#%d\n", global_tid ) ); if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); if( KMP_MASTER_GTID( global_tid )) { KMP_START_EXPLICIT_TIMER(OMP_master); status = 1; } #if OMPT_SUPPORT && OMPT_TRACE if (status) { if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_master_begin)) { kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_master_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } } #endif if ( __kmp_env_consistency_check ) { #if KMP_USE_DYNAMIC_LOCK if (status) __kmp_push_sync( global_tid, ct_master, loc, NULL, 0 ); else __kmp_check_sync( global_tid, ct_master, loc, NULL, 0 ); #else if (status) __kmp_push_sync( global_tid, ct_master, loc, NULL ); else __kmp_check_sync( global_tid, ct_master, loc, NULL ); #endif } return status; } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . Mark the end of a <tt>master</tt> region. This should only be called by the thread that executes the <tt>master</tt> region. */ void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_master: called T#%d\n", global_tid ) ); KMP_DEBUG_ASSERT( KMP_MASTER_GTID( global_tid )); KMP_STOP_EXPLICIT_TIMER(OMP_master); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_master_end)) { int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_master_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif if ( __kmp_env_consistency_check ) { if( global_tid < 0 ) KMP_WARNING( ThreadIdentInvalid ); if( KMP_MASTER_GTID( global_tid )) __kmp_pop_sync( global_tid, ct_master, loc ); } } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. Start execution of an <tt>ordered</tt> construct. */ void __kmpc_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KMP_DEBUG_ASSERT( __kmp_init_serial ); KC_TRACE( 10, ("__kmpc_ordered: called T#%d\n", gtid )); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); #if USE_ITT_BUILD __kmp_itt_ordered_prep( gtid ); // TODO: ordered_wait_id #endif /* USE_ITT_BUILD */ th = __kmp_threads[ gtid ]; #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { /* OMPT state update */ th->th.ompt_thread_info.wait_id = (uint64_t) loc; th->th.ompt_thread_info.state = ompt_state_wait_ordered; /* OMPT event callback */ if (ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) { ompt_callbacks.ompt_callback(ompt_event_wait_ordered)( th->th.ompt_thread_info.wait_id); } } #endif if ( th -> th.th_dispatch -> th_deo_fcn != 0 ) (*th->th.th_dispatch->th_deo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_deo( & gtid, & cid, loc ); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { /* OMPT state update */ th->th.ompt_thread_info.state = ompt_state_work_parallel; th->th.ompt_thread_info.wait_id = 0; /* OMPT event callback */ if (ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) { ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)( th->th.ompt_thread_info.wait_id); } } #endif #if USE_ITT_BUILD __kmp_itt_ordered_start( gtid ); #endif /* USE_ITT_BUILD */ } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. End execution of an <tt>ordered</tt> construct. */ void __kmpc_end_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KC_TRACE( 10, ("__kmpc_end_ordered: called T#%d\n", gtid ) ); #if USE_ITT_BUILD __kmp_itt_ordered_end( gtid ); // TODO: ordered_wait_id #endif /* USE_ITT_BUILD */ th = __kmp_threads[ gtid ]; if ( th -> th.th_dispatch -> th_dxo_fcn != 0 ) (*th->th.th_dispatch->th_dxo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_dxo( & gtid, & cid, loc ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_ordered)) { ompt_callbacks.ompt_callback(ompt_event_release_ordered)( th->th.ompt_thread_info.wait_id); } #endif } #if KMP_USE_DYNAMIC_LOCK static __forceinline void __kmp_init_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid, kmp_indirect_locktag_t tag) { // Pointer to the allocated indirect lock is written to crit, while indexing is ignored. void *idx; kmp_indirect_lock_t **lck; lck = (kmp_indirect_lock_t **)crit; kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag); KMP_I_LOCK_FUNC(ilk, init)(ilk->lock); KMP_SET_I_LOCK_LOCATION(ilk, loc); KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section); KA_TRACE(20, ("__kmp_init_indirect_csptr: initialized indirect lock #%d\n", tag)); #if USE_ITT_BUILD __kmp_itt_critical_creating(ilk->lock, loc); #endif int status = KMP_COMPARE_AND_STORE_PTR(lck, 0, ilk); if (status == 0) { #if USE_ITT_BUILD __kmp_itt_critical_destroyed(ilk->lock); #endif // We don't really need to destroy the unclaimed lock here since it will be cleaned up at program exit. //KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx); } KMP_DEBUG_ASSERT(*lck != NULL); } // Fast-path acquire tas lock #define KMP_ACQUIRE_TAS_LOCK(lock, gtid) { \ kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ if (l->lk.poll != KMP_LOCK_FREE(tas) || \ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \ kmp_uint32 spins; \ KMP_FSYNC_PREPARE(l); \ KMP_INIT_YIELD(spins); \ if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ KMP_YIELD(TRUE); \ } else { \ KMP_YIELD_SPIN(spins); \ } \ while (l->lk.poll != KMP_LOCK_FREE(tas) || \ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \ if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ KMP_YIELD(TRUE); \ } else { \ KMP_YIELD_SPIN(spins); \ } \ } \ } \ KMP_FSYNC_ACQUIRED(l); \ } // Fast-path test tas lock #define KMP_TEST_TAS_LOCK(lock, gtid, rc) { \ kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ rc = l->lk.poll == KMP_LOCK_FREE(tas) && \ KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas)); \ } // Fast-path release tas lock #define KMP_RELEASE_TAS_LOCK(lock, gtid) { \ TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, KMP_LOCK_FREE(tas)); \ KMP_MB(); \ } #if KMP_USE_FUTEX # include <unistd.h> # include <sys/syscall.h> # ifndef FUTEX_WAIT # define FUTEX_WAIT 0 # endif # ifndef FUTEX_WAKE # define FUTEX_WAKE 1 # endif // Fast-path acquire futex lock #define KMP_ACQUIRE_FUTEX_LOCK(lock, gtid) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ kmp_int32 gtid_code = (gtid+1) << 1; \ KMP_MB(); \ KMP_FSYNC_PREPARE(ftx); \ kmp_int32 poll_val; \ while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), \ KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) { \ kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1; \ if (!cond) { \ if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | KMP_LOCK_BUSY(1, futex))) { \ continue; \ } \ poll_val |= KMP_LOCK_BUSY(1, futex); \ } \ kmp_int32 rc; \ if ((rc = syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAIT, poll_val, NULL, NULL, 0)) != 0) { \ continue; \ } \ gtid_code |= 1; \ } \ KMP_FSYNC_ACQUIRED(ftx); \ } // Fast-path test futex lock #define KMP_TEST_FUTEX_LOCK(lock, gtid, rc) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY(gtid+1, futex) << 1)) { \ KMP_FSYNC_ACQUIRED(ftx); \ rc = TRUE; \ } else { \ rc = FALSE; \ } \ } // Fast-path release futex lock #define KMP_RELEASE_FUTEX_LOCK(lock, gtid) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ KMP_MB(); \ KMP_FSYNC_RELEASING(ftx); \ kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), KMP_LOCK_FREE(futex)); \ if (KMP_LOCK_STRIP(poll_val) & 1) { \ syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0); \ } \ KMP_MB(); \ KMP_YIELD(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); \ } #endif // KMP_USE_FUTEX #else // KMP_USE_DYNAMIC_LOCK static kmp_user_lock_p __kmp_get_critical_section_ptr( kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid ) { kmp_user_lock_p *lck_pp = (kmp_user_lock_p *)crit; // // Because of the double-check, the following load // doesn't need to be volatile. // kmp_user_lock_p lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); if ( lck == NULL ) { void * idx; // Allocate & initialize the lock. // Remember allocated locks in table in order to free them in __kmp_cleanup() lck = __kmp_user_lock_allocate( &idx, gtid, kmp_lf_critical_section ); __kmp_init_user_lock_with_checks( lck ); __kmp_set_user_lock_location( lck, loc ); #if USE_ITT_BUILD __kmp_itt_critical_creating( lck ); // __kmp_itt_critical_creating() should be called *before* the first usage of underlying // lock. It is the only place where we can guarantee it. There are chances the lock will // destroyed with no usage, but it is not a problem, because this is not real event seen // by user but rather setting name for object (lock). See more details in kmp_itt.h. #endif /* USE_ITT_BUILD */ // // Use a cmpxchg instruction to slam the start of the critical // section with the lock pointer. If another thread beat us // to it, deallocate the lock, and use the lock that the other // thread allocated. // int status = KMP_COMPARE_AND_STORE_PTR( lck_pp, 0, lck ); if ( status == 0 ) { // Deallocate the lock and reload the value. #if USE_ITT_BUILD __kmp_itt_critical_destroyed( lck ); // Let ITT know the lock is destroyed and the same memory location may be reused for // another purpose. #endif /* USE_ITT_BUILD */ __kmp_destroy_user_lock_with_checks( lck ); __kmp_user_lock_free( &idx, gtid, lck ); lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); KMP_DEBUG_ASSERT( lck != NULL ); } } return lck; } #endif // KMP_USE_DYNAMIC_LOCK /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Enter code protected by a `critical` construct. This function blocks until the executing thread can enter the critical section. */ void __kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { #if KMP_USE_DYNAMIC_LOCK __kmpc_critical_with_hint(loc, global_tid, crit, omp_lock_hint_none); #else KMP_COUNT_BLOCK(OMP_CRITICAL); kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); //TODO: add THR_OVHD_STATE KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #endif else { // ticket, queuing or drdpa lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); } if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_critical, loc, lck ); /* since the critical directive binds to all threads, not just * the current team we have to check this even if we are in a * serialized team */ /* also, even if we are the uber thread, we still have to conduct the lock, * as we have to contend with sibling threads */ #if USE_ITT_BUILD __kmp_itt_critical_acquiring( lck ); #endif /* USE_ITT_BUILD */ // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_acquire_user_lock_with_checks( lck, global_tid ); #if USE_ITT_BUILD __kmp_itt_critical_acquired( lck ); #endif /* USE_ITT_BUILD */ KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); #endif // KMP_USE_DYNAMIC_LOCK } #if KMP_USE_DYNAMIC_LOCK // Converts the given hint to an internal lock implementation static __forceinline kmp_dyna_lockseq_t __kmp_map_hint_to_lock(uintptr_t hint) { #if KMP_USE_TSX # define KMP_TSX_LOCK(seq) lockseq_##seq #else # define KMP_TSX_LOCK(seq) __kmp_user_lock_seq #endif // Hints that do not require further logic if (hint & kmp_lock_hint_hle) return KMP_TSX_LOCK(hle); if (hint & kmp_lock_hint_rtm) return (__kmp_cpuinfo.rtm)? KMP_TSX_LOCK(rtm): __kmp_user_lock_seq; if (hint & kmp_lock_hint_adaptive) return (__kmp_cpuinfo.rtm)? KMP_TSX_LOCK(adaptive): __kmp_user_lock_seq; // Rule out conflicting hints first by returning the default lock if ((hint & omp_lock_hint_contended) && (hint & omp_lock_hint_uncontended)) return __kmp_user_lock_seq; if ((hint & omp_lock_hint_speculative) && (hint & omp_lock_hint_nonspeculative)) return __kmp_user_lock_seq; // Do not even consider speculation when it appears to be contended if (hint & omp_lock_hint_contended) return lockseq_queuing; // Uncontended lock without speculation if ((hint & omp_lock_hint_uncontended) && !(hint & omp_lock_hint_speculative)) return lockseq_tas; // HLE lock for speculation if (hint & omp_lock_hint_speculative) return KMP_TSX_LOCK(hle); return __kmp_user_lock_seq; } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number. @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. @param hint the lock hint. Enter code protected by a `critical` construct with a hint. The hint value is used to suggest a lock implementation. This function blocks until the executing thread can enter the critical section unless the hint suggests use of speculative execution and the hardware supports it. */ void __kmpc_critical_with_hint( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit, uintptr_t hint ) { KMP_COUNT_BLOCK(OMP_CRITICAL); kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit; // Check if it is initialized. if (*lk == 0) { kmp_dyna_lockseq_t lckseq = __kmp_map_hint_to_lock(hint); if (KMP_IS_D_LOCK(lckseq)) { KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(lckseq)); } else { __kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(lckseq)); } } // Branch for accessing the actual lock object and set operation. This branching is inevitable since // this lock initialization does not follow the normal dispatch path (lock table is not used). if (KMP_EXTRACT_D_TAG(lk) != 0) { lck = (kmp_user_lock_p)lk; if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint)); } # if USE_ITT_BUILD __kmp_itt_critical_acquiring(lck); # endif # if KMP_USE_INLINED_TAS if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { KMP_ACQUIRE_TAS_LOCK(lck, global_tid); } else # elif KMP_USE_INLINED_FUTEX if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { KMP_ACQUIRE_FUTEX_LOCK(lck, global_tid); } else # endif { KMP_D_LOCK_FUNC(lk, set)(lk, global_tid); } } else { kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk); lck = ilk->lock; if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint)); } # if USE_ITT_BUILD __kmp_itt_critical_acquiring(lck); # endif KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid); } #if USE_ITT_BUILD __kmp_itt_critical_acquired( lck ); #endif /* USE_ITT_BUILD */ KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); } // __kmpc_critical_with_hint #endif // KMP_USE_DYNAMIC_LOCK /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Leave a critical section, releasing any lock that was held during its execution. */ void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit) { kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid )); #if KMP_USE_DYNAMIC_LOCK if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; KMP_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_pop_sync(global_tid, ct_critical, loc); } # if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); # endif # if KMP_USE_INLINED_TAS if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { KMP_RELEASE_TAS_LOCK(lck, global_tid); } else # elif KMP_USE_INLINED_FUTEX if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { KMP_RELEASE_FUTEX_LOCK(lck, global_tid); } else # endif { KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); } } else { kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); KMP_ASSERT(ilk != NULL); lck = ilk->lock; if (__kmp_env_consistency_check) { __kmp_pop_sync(global_tid, ct_critical, loc); } # if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); # endif KMP_I_LOCK_FUNC(ilk, unset)(lck, global_tid); } #else // KMP_USE_DYNAMIC_LOCK if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #endif else { // ticket, queuing or drdpa lck = (kmp_user_lock_p) TCR_PTR(*((kmp_user_lock_p *)crit)); } KMP_ASSERT(lck != NULL); if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_critical, loc ); #if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); #endif /* USE_ITT_BUILD */ // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_release_user_lock_with_checks( lck, global_tid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_critical)) { ompt_callbacks.ompt_callback(ompt_event_release_critical)( (uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK KA_TRACE( 15, ("__kmpc_end_critical: done T#%d\n", global_tid )); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master. The barrier is executed inside this function. */ kmp_int32 __kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid) { int status; KC_TRACE( 10, ("__kmpc_barrier_master: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) __kmp_check_barrier( global_tid, ct_barrier, loc ); #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif status = __kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL ); return (status != 0) ? 0 : 1; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Complete the execution of a combined barrier and master. This function should only be called at the completion of the <tt>master</tt> code. Other threads will still be waiting at the barrier and this call releases them. */ void __kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid )); __kmp_end_split_barrier ( bs_plain_barrier, global_tid ); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master(nowait) construct. The barrier is executed inside this function. There is no equivalent "end" function, since the */ kmp_int32 __kmpc_barrier_master_nowait( ident_t * loc, kmp_int32 global_tid ) { kmp_int32 ret; KC_TRACE( 10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid )); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? } __kmp_check_barrier( global_tid, ct_barrier, loc ); } #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); ret = __kmpc_master (loc, global_tid); if ( __kmp_env_consistency_check ) { /* there's no __kmpc_end_master called; so the (stats) */ /* actions of __kmpc_end_master are done here */ if ( global_tid < 0 ) { KMP_WARNING( ThreadIdentInvalid ); } if (ret) { /* only one thread should do the pop since only */ /* one did the push (see __kmpc_master()) */ __kmp_pop_sync( global_tid, ct_master, loc ); } } return (ret); } /* The BARRIER for a SINGLE process section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number @return One if this thread should execute the single construct, zero otherwise. Test whether to execute a <tt>single</tt> construct. There are no implicit barriers in the two "single" calls, rather the compiler should introduce an explicit barrier if it is required. */ kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_SINGLE); kmp_int32 rc = __kmp_enter_single( global_tid, loc, TRUE ); if(rc == TRUE) { KMP_START_EXPLICIT_TIMER(OMP_single); } #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); if (ompt_enabled) { if (rc) { if (ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)) { ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id, team->t.ompt_team_info.microtask); } } else { if (ompt_callbacks.ompt_callback(ompt_event_single_others_begin)) { ompt_callbacks.ompt_callback(ompt_event_single_others_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } this_thr->th.ompt_thread_info.state = ompt_state_wait_single; } } #endif return rc; } /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number Mark the end of a <tt>single</tt> construct. This function should only be called by the thread that executed the block of code protected by the `single` construct. */ void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid) { __kmp_exit_single( global_tid ); KMP_STOP_EXPLICIT_TIMER(OMP_single); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)) { ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif } /*! @ingroup WORK_SHARING @param loc Source location @param global_tid Global thread id Mark the end of a statically scheduled loop. */ void __kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid ) { KE_TRACE( 10, ("__kmpc_for_static_fini called T#%d\n", global_tid)); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_end)) { kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_loop_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif if ( __kmp_env_consistency_check ) __kmp_pop_workshare( global_tid, ct_pdo, loc ); } /* * User routines which take C-style arguments (call by value) * different from the Fortran equivalent routines */ void ompc_set_num_threads( int arg ) { // !!!!! TODO: check the per-task binding __kmp_set_num_threads( arg, __kmp_entry_gtid() ); } void ompc_set_dynamic( int flag ) { kmp_info_t *thread; /* For the thread-private implementation of the internal controls */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__dynamic( thread, flag ? TRUE : FALSE ); } void ompc_set_nested( int flag ) { kmp_info_t *thread; /* For the thread-private internal controls implementation */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__nested( thread, flag ? TRUE : FALSE ); } void ompc_set_max_active_levels( int max_active_levels ) { /* TO DO */ /* we want per-task implementation of this internal control */ /* For the per-thread internal controls implementation */ __kmp_set_max_active_levels( __kmp_entry_gtid(), max_active_levels ); } void ompc_set_schedule( omp_sched_t kind, int modifier ) { // !!!!! TODO: check the per-task binding __kmp_set_schedule( __kmp_entry_gtid(), ( kmp_sched_t ) kind, modifier ); } int ompc_get_ancestor_thread_num( int level ) { return __kmp_get_ancestor_thread_num( __kmp_entry_gtid(), level ); } int ompc_get_team_size( int level ) { return __kmp_get_team_size( __kmp_entry_gtid(), level ); } void kmpc_set_stacksize( int arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_stacksize_s( size_t arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_blocktime( int arg ) { int gtid, tid; kmp_info_t *thread; gtid = __kmp_entry_gtid(); tid = __kmp_tid_from_gtid(gtid); thread = __kmp_thread_from_gtid(gtid); __kmp_aux_set_blocktime( arg, thread, tid ); } void kmpc_set_library( int arg ) { // __kmp_user_set_library initializes the library if needed __kmp_user_set_library( (enum library_type)arg ); } void kmpc_set_defaults( char const * str ) { // __kmp_aux_set_defaults initializes the library if needed __kmp_aux_set_defaults( str, KMP_STRLEN( str ) ); } int kmpc_set_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_set_affinity_mask_proc( proc, mask ); #endif } int kmpc_unset_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_unset_affinity_mask_proc( proc, mask ); #endif } int kmpc_get_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_get_affinity_mask_proc( proc, mask ); #endif } /* -------------------------------------------------------------------------- */ /*! @ingroup THREADPRIVATE @param loc source location information @param gtid global thread number @param cpy_size size of the cpy_data buffer @param cpy_data pointer to data to be copied @param cpy_func helper function to call for copying data @param didit flag variable: 1=single thread; 0=not single thread __kmpc_copyprivate implements the interface for the private data broadcast needed for the copyprivate clause associated with a single region in an OpenMP<sup>*</sup> program (both C and Fortran). All threads participating in the parallel region call this routine. One of the threads (called the single thread) should have the <tt>didit</tt> variable set to 1 and all other threads should have that variable set to 0. All threads pass a pointer to a data buffer (cpy_data) that they have built. The OpenMP specification forbids the use of nowait on the single region when a copyprivate clause is present. However, @ref __kmpc_copyprivate implements a barrier internally to avoid race conditions, so the code generation for the single region should avoid generating a barrier after the call to @ref __kmpc_copyprivate. The <tt>gtid</tt> parameter is the global thread id for the current thread. The <tt>loc</tt> parameter is a pointer to source location information. Internal implementation: The single thread will first copy its descriptor address (cpy_data) to a team-private location, then the other threads will each call the function pointed to by the parameter cpy_func, which carries out the copy by copying the data using the cpy_data buffer. The cpy_func routine used for the copy and the contents of the data area defined by cpy_data and cpy_size may be built in any fashion that will allow the copy to be done. For instance, the cpy_data buffer can hold the actual data to be copied or it may hold a list of pointers to the data. The cpy_func routine must interpret the cpy_data buffer appropriately. The interface to cpy_func is as follows: @code void cpy_func( void *destination, void *source ) @endcode where void *destination is the cpy_data pointer for the thread being copied to and void *source is the cpy_data pointer for the thread being copied from. */ void __kmpc_copyprivate( ident_t *loc, kmp_int32 gtid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void*,void*), kmp_int32 didit ) { void **data_ptr; KC_TRACE( 10, ("__kmpc_copyprivate: called T#%d\n", gtid )); KMP_MB(); data_ptr = & __kmp_team_from_gtid( gtid )->t.t_copypriv_data; if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); } } /* ToDo: Optimize the following two barriers into some kind of split barrier */ if (didit) *data_ptr = cpy_data; /* This barrier is not a barrier region boundary */ #if USE_ITT_NOTIFY __kmp_threads[gtid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); if (! didit) (*cpy_func)( cpy_data, *data_ptr ); /* Consider next barrier the user-visible barrier for barrier region boundaries */ /* Nesting checks are already handled by the single construct checks */ #if USE_ITT_NOTIFY __kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g. tasks can overwrite the location) #endif __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); } /* -------------------------------------------------------------------------- */ #define INIT_LOCK __kmp_init_user_lock_with_checks #define INIT_NESTED_LOCK __kmp_init_nested_user_lock_with_checks #define ACQUIRE_LOCK __kmp_acquire_user_lock_with_checks #define ACQUIRE_LOCK_TIMED __kmp_acquire_user_lock_with_checks_timed #define ACQUIRE_NESTED_LOCK __kmp_acquire_nested_user_lock_with_checks #define ACQUIRE_NESTED_LOCK_TIMED __kmp_acquire_nested_user_lock_with_checks_timed #define RELEASE_LOCK __kmp_release_user_lock_with_checks #define RELEASE_NESTED_LOCK __kmp_release_nested_user_lock_with_checks #define TEST_LOCK __kmp_test_user_lock_with_checks #define TEST_NESTED_LOCK __kmp_test_nested_user_lock_with_checks #define DESTROY_LOCK __kmp_destroy_user_lock_with_checks #define DESTROY_NESTED_LOCK __kmp_destroy_nested_user_lock_with_checks /* * TODO: Make check abort messages use location info & pass it * into with_checks routines */ #if KMP_USE_DYNAMIC_LOCK // internal lock initializer static __forceinline void __kmp_init_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq) { if (KMP_IS_D_LOCK(seq)) { KMP_INIT_D_LOCK(lock, seq); #if USE_ITT_BUILD __kmp_itt_lock_creating((kmp_user_lock_p)lock, NULL); #endif } else { KMP_INIT_I_LOCK(lock, seq); #if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock); __kmp_itt_lock_creating(ilk->lock, loc); #endif } } // internal nest lock initializer static __forceinline void __kmp_init_nest_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq) { #if KMP_USE_TSX // Don't have nested lock implementation for speculative locks if (seq == lockseq_hle || seq == lockseq_rtm || seq == lockseq_adaptive) seq = __kmp_user_lock_seq; #endif switch (seq) { case lockseq_tas: seq = lockseq_nested_tas; break; #if KMP_USE_FUTEX case lockseq_futex: seq = lockseq_nested_futex; break; #endif case lockseq_ticket: seq = lockseq_nested_ticket; break; case lockseq_queuing: seq = lockseq_nested_queuing; break; case lockseq_drdpa: seq = lockseq_nested_drdpa; break; default: seq = lockseq_nested_queuing; } KMP_INIT_I_LOCK(lock, seq); #if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock); __kmp_itt_lock_creating(ilk->lock, loc); #endif } /* initialize the lock with a hint */ void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint) { KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_lock_with_hint"); } __kmp_init_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint)); } /* initialize the lock with a hint */ void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint) { KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock_with_hint"); } __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint)); } #endif // KMP_USE_DYNAMIC_LOCK /* initialize the lock */ void __kmpc_init_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_lock"); } __kmp_init_lock_with_hint(loc, user_lock, __kmp_user_lock_seq); #else // KMP_USE_DYNAMIC_LOCK static char const * const func = "omp_init_lock"; kmp_user_lock_p lck; KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( __kmp_env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); } INIT_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_init_lock)) { ompt_callbacks.ompt_callback(ompt_event_init_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_init_lock /* initialize the lock */ void __kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock"); } __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_user_lock_seq); #else // KMP_USE_DYNAMIC_LOCK static char const * const func = "omp_init_nest_lock"; kmp_user_lock_p lck; KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( __kmp_env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); } INIT_NESTED_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_init_nest_lock)) { ompt_callbacks.ompt_callback(ompt_event_init_nest_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_init_nest_lock void __kmpc_destroy_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD kmp_user_lock_p lck; if (KMP_EXTRACT_D_TAG(user_lock) == 0) { lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock; } else { lck = (kmp_user_lock_p)user_lock; } __kmp_itt_lock_destroyed(lck); # endif KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); #else kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_lock" ); } #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_destroy_lock)) { ompt_callbacks.ompt_callback(ompt_event_destroy_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ DESTROY_LOCK( lck ); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { ; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { ; } #endif else { __kmp_user_lock_free( user_lock, gtid, lck ); } #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_destroy_lock /* destroy the lock */ void __kmpc_destroy_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock); __kmp_itt_lock_destroyed(ilk->lock); # endif KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_nest_lock" ); } #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_destroy_nest_lock)) { ompt_callbacks.ompt_callback(ompt_event_destroy_nest_lock)((uint64_t) lck); } #endif #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ DESTROY_NESTED_LOCK( lck ); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { ; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { ; } #endif else { __kmp_user_lock_free( user_lock, gtid, lck ); } #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_destroy_nest_lock void __kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { KMP_COUNT_BLOCK(OMP_set_lock); #if KMP_USE_DYNAMIC_LOCK int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); // itt function will get to the right lock object. # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_ACQUIRE_TAS_LOCK(user_lock, gtid); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid); } else # endif { __kmp_direct_set[tag]((kmp_dyna_lock_t *)user_lock, gtid); } # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); # endif #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_set_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ ACQUIRE_LOCK( lck, gtid ); #if USE_ITT_BUILD __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_acquired_lock)) { ompt_callbacks.ompt_callback(ompt_event_acquired_lock)((uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK } void __kmpc_set_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif KMP_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid); # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); #endif #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { // missing support here: need to know whether acquired first or not } #endif #else // KMP_USE_DYNAMIC_LOCK int acquire_status; kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_set_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ ACQUIRE_NESTED_LOCK( lck, gtid, &acquire_status ); #if USE_ITT_BUILD __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ #if OMPT_SUPPORT && OMPT_TRACE if (ompt_enabled) { if (acquire_status == KMP_LOCK_ACQUIRED_FIRST) { if(ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_first)) ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_first)((uint64_t) lck); } else { if(ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_next)) ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_next)((uint64_t) lck); } } #endif #endif // KMP_USE_DYNAMIC_LOCK } void __kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_RELEASE_TAS_LOCK(user_lock, gtid); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_RELEASE_FUTEX_LOCK(user_lock, gtid); } else # endif { __kmp_direct_unset[tag]((kmp_dyna_lock_t *)user_lock, gtid); } #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; /* Can't use serial interval since not block structured */ /* release the lock */ if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) // "fast" path implemented to fix customer performance issue #if USE_ITT_BUILD __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); #endif /* USE_ITT_BUILD */ TCW_4(((kmp_user_lock_p)user_lock)->tas.lk.poll, 0); KMP_MB(); return; #else lck = (kmp_user_lock_p)user_lock; #endif } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_unset_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_releasing( lck ); #endif /* USE_ITT_BUILD */ RELEASE_LOCK( lck, gtid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_release_lock)) { ompt_callbacks.ompt_callback(ompt_event_release_lock)((uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK } /* release the lock */ void __kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); # endif KMP_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid); #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; /* Can't use serial interval since not block structured */ if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) // "fast" path implemented to fix customer performance issue kmp_tas_lock_t *tl = (kmp_tas_lock_t*)user_lock; #if USE_ITT_BUILD __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); #endif /* USE_ITT_BUILD */ if ( --(tl->lk.depth_locked) == 0 ) { TCW_4(tl->lk.poll, 0); } KMP_MB(); return; #else lck = (kmp_user_lock_p)user_lock; #endif } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_unset_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_releasing( lck ); #endif /* USE_ITT_BUILD */ int release_status; release_status = RELEASE_NESTED_LOCK( lck, gtid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_enabled) { if (release_status == KMP_LOCK_RELEASED) { if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)) { ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)( (uint64_t) lck); } } else if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)) { ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)( (uint64_t) lck); } } #endif #endif // KMP_USE_DYNAMIC_LOCK } /* try to acquire the lock */ int __kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { KMP_COUNT_BLOCK(OMP_test_lock); #if KMP_USE_DYNAMIC_LOCK int rc; int tag = KMP_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif # if KMP_USE_INLINED_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { KMP_TEST_TAS_LOCK(user_lock, gtid, rc); } else # elif KMP_USE_INLINED_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc); } else # endif { rc = __kmp_direct_test[tag]((kmp_dyna_lock_t *)user_lock, gtid); } if (rc) { # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); # endif return FTN_TRUE; } else { # if USE_ITT_BUILD __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); # endif return FTN_FALSE; } #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; int rc; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_test_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ rc = TEST_LOCK( lck, gtid ); #if USE_ITT_BUILD if ( rc ) { __kmp_itt_lock_acquired( lck ); } else { __kmp_itt_lock_cancelled( lck ); } #endif /* USE_ITT_BUILD */ return ( rc ? FTN_TRUE : FTN_FALSE ); /* Can't use serial interval since not block structured */ #endif // KMP_USE_DYNAMIC_LOCK } /* try to acquire the lock */ int __kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK int rc; # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif rc = KMP_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid); # if USE_ITT_BUILD if (rc) { __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); } else { __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); } # endif return rc; #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; int rc; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_test_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ rc = TEST_NESTED_LOCK( lck, gtid ); #if USE_ITT_BUILD if ( rc ) { __kmp_itt_lock_acquired( lck ); } else { __kmp_itt_lock_cancelled( lck ); } #endif /* USE_ITT_BUILD */ return rc; /* Can't use serial interval since not block structured */ #endif // KMP_USE_DYNAMIC_LOCK } /*--------------------------------------------------------------------------------------------------------------------*/ /* * Interface to fast scalable reduce methods routines */ // keep the selected method in a thread local structure for cross-function usage: will be used in __kmpc_end_reduce* functions; // another solution: to re-determine the method one more time in __kmpc_end_reduce* functions (new prototype required then) // AT: which solution is better? #define __KMP_SET_REDUCTION_METHOD(gtid,rmethod) \ ( ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) = ( rmethod ) ) #define __KMP_GET_REDUCTION_METHOD(gtid) \ ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) // description of the packed_reduction_method variable: look at the macros in kmp.h // used in a critical section reduce block static __forceinline void __kmp_enter_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { // this lock was visible to a customer and to the threading profile tool as a serial overhead span // (although it's used for an internal purpose only) // why was it visible in previous implementation? // should we keep it visible in new reduce block? kmp_user_lock_p lck; #if KMP_USE_DYNAMIC_LOCK kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit; // Check if it is initialized. if (*lk == 0) { if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(__kmp_user_lock_seq)); } else { __kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(__kmp_user_lock_seq)); } } // Branch for accessing the actual lock object and set operation. This branching is inevitable since // this lock initialization does not follow the normal dispatch path (lock table is not used). if (KMP_EXTRACT_D_TAG(lk) != 0) { lck = (kmp_user_lock_p)lk; KMP_DEBUG_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); } KMP_D_LOCK_FUNC(lk, set)(lk, global_tid); } else { kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk); lck = ilk->lock; KMP_DEBUG_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); } KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid); } #else // KMP_USE_DYNAMIC_LOCK // We know that the fast reduction code is only emitted by Intel compilers // with 32 byte critical sections. If there isn't enough space, then we // have to use a pointer. if ( __kmp_base_user_lock_size <= INTEL_CRITICAL_SIZE ) { lck = (kmp_user_lock_p)crit; } else { lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); } KMP_DEBUG_ASSERT( lck != NULL ); if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_critical, loc, lck ); __kmp_acquire_user_lock_with_checks( lck, global_tid ); #endif // KMP_USE_DYNAMIC_LOCK } // used in a critical section reduce block static __forceinline void __kmp_end_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { kmp_user_lock_p lck; #if KMP_USE_DYNAMIC_LOCK if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; if (__kmp_env_consistency_check) __kmp_pop_sync(global_tid, ct_critical, loc); KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); } else { kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); if (__kmp_env_consistency_check) __kmp_pop_sync(global_tid, ct_critical, loc); KMP_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid); } #else // KMP_USE_DYNAMIC_LOCK // We know that the fast reduction code is only emitted by Intel compilers with 32 byte critical // sections. If there isn't enough space, then we have to use a pointer. if ( __kmp_base_user_lock_size > 32 ) { lck = *( (kmp_user_lock_p *) crit ); KMP_ASSERT( lck != NULL ); } else { lck = (kmp_user_lock_p) crit; } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_critical, loc ); __kmp_release_user_lock_with_checks( lck, global_tid ); #endif // KMP_USE_DYNAMIC_LOCK } // __kmp_end_critical_section_reduce_block /* 2.a.i. Reduce Block without a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed The nowait version is used for a reduce clause with the nowait argument. */ kmp_int32 __kmpc_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_nowait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; #if OMP_40_ENABLED kmp_team_t *team; kmp_info_t *th; int teams_swapped = 0, task_state; #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be used as a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting #if KMP_USE_DYNAMIC_LOCK if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); #else if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); #endif #if OMP_40_ENABLED th = __kmp_thread_from_gtid(global_tid); if( th->th.th_teams_microtask ) { // AC: check if we are inside the teams construct? team = th->th.th_team; if( team->t.t_level == th->th.th_teams_level ) { // this is reduction at teams construct KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid); // AC: check that tid == 0 // Let's swap teams temporarily for the reduction barrier teams_swapped = 1; th->th.th_info.ds.ds_tid = team->t.t_master_tid; th->th.th_team = team->t.t_parent; th->th.th_team_nproc = th->th.th_team->t.t_nproc; th->th.th_task_team = th->th.th_team->t.t_task_team[0]; task_state = th->th.th_task_state; th->th.th_task_state = 0; } } #endif // OMP_40_ENABLED // packed_reduction_method value will be reused by __kmp_end_reduce* function, the value should be kept in a variable // the variable should be either a construct-specific or thread-specific property, not a team specific property // (a thread can reach the next reduce block on the next construct, reduce method may differ on the next construct) // an ident_t "loc" parameter could be used as a construct-specific property (what if loc == 0?) // (if both construct-specific and team-specific variables were shared, then unness extra syncs should be needed) // a thread-specific variable is better regarding two issues above (next construct and extra syncs) // a thread-specific "th_local.reduction_method" variable is used currently // each thread executes 'determine' and 'set' lines (no need to execute by one thread, to avoid unness extra syncs) packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); if( packed_reduction_method == critical_reduce_block ) { __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; // all threads should do this pop here (because __kmpc_end_reduce_nowait() won't be called by the code gen) // (it's not quite good, because the checking block has been closed by this 'pop', // but atomic operation has not been executed yet, will be executed slightly later, literally on next instruction) if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { //AT: performance issue: a real barrier here //AT: (if master goes slow, other threads are blocked here waiting for the master to come and release them) //AT: (it's not what a customer might expect specifying NOWAIT clause) //AT: (specifying NOWAIT won't result in improvement of performance, it'll be confusing to a customer) //AT: another implementation of *barrier_gather*nowait() (or some other design) might go faster // and be more in line with sense of NOWAIT //AT: TO DO: do epcc test and compare times // this barrier should be invisible to a customer and to the threading profile tool // (it's neither a terminating barrier nor customer's code, it's used for an internal purpose) #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, FALSE, reduce_size, reduce_data, reduce_func ); retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); // all other workers except master should do this pop here // ( none of other workers will get to __kmpc_end_reduce_nowait() ) if ( __kmp_env_consistency_check ) { if( retval == 0 ) { __kmp_pop_sync( global_tid, ct_reduce, loc ); } } } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } #if OMP_40_ENABLED if( teams_swapped ) { // Restore thread structure th->th.th_info.ds.ds_tid = 0; th->th.th_team = team; th->th.th_team_nproc = team->t.t_nproc; th->th.th_task_team = team->t.t_task_team[task_state]; th->th.th_task_state = task_state; } #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a reduce nowait. */ void __kmpc_end_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); if( packed_reduction_method == critical_reduce_block ) { __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( on Intel platforms only ) } else if( packed_reduction_method == atomic_reduce_block ) { // neither master nor other workers should get here // (code gen does not generate this call in case 2: atomic reduce block) // actually it's better to remove this elseif at all; // after removal this value will checked by the 'else' and will assert } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { // only master gets here } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } /* 2.a.ii. Reduce Block with a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed A blocking reduce that includes an implicit barrier. */ kmp_int32 __kmpc_reduce( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_wait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_reduce() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting #if KMP_USE_DYNAMIC_LOCK if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); #else if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); #endif packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); if( packed_reduction_method == critical_reduce_block ) { __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { //case tree_reduce_block: // this barrier should be visible to a customer and to the threading profile tool // (it's a terminating barrier on constructs if NOWAIT not specified) #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; // needed for correct notification of frames #endif retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, TRUE, reduce_size, reduce_data, reduce_func ); retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); // all other workers except master should do this pop here // ( none of other workers except master will enter __kmpc_end_reduce() ) if ( __kmp_env_consistency_check ) { if( retval == 0 ) { // 0: all other workers; 1: master __kmp_pop_sync( global_tid, ct_reduce, loc ); } } } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } KA_TRACE( 10, ( "__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a blocking reduce. The <tt>lck</tt> pointer must be the same as that used in the corresponding start function. */ void __kmpc_end_reduce( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); // this barrier should be visible to a customer and to the threading profile tool // (it's a terminating barrier on constructs if NOWAIT not specified) if( packed_reduction_method == critical_reduce_block ) { __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( packed_reduction_method == atomic_reduce_block ) { // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { // only master executes here (master releases all other workers) __kmp_end_split_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid ); } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } #undef __KMP_GET_REDUCTION_METHOD #undef __KMP_SET_REDUCTION_METHOD /*-- end of interface to fast scalable reduce routines ---------------------------------------------------------------*/ kmp_uint64 __kmpc_get_taskid() { kmp_int32 gtid; kmp_info_t * thread; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); return thread->th.th_current_task->td_task_id; } // __kmpc_get_taskid kmp_uint64 __kmpc_get_parent_taskid() { kmp_int32 gtid; kmp_info_t * thread; kmp_taskdata_t * parent_task; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); parent_task = thread->th.th_current_task->td_parent; return ( parent_task == NULL ? 0 : parent_task->td_task_id ); } // __kmpc_get_parent_taskid void __kmpc_place_threads(int nS, int sO, int nC, int cO, int nT) { if ( ! __kmp_init_serial ) { __kmp_serial_initialize(); } __kmp_place_num_sockets = nS; __kmp_place_socket_offset = sO; __kmp_place_num_cores = nC; __kmp_place_core_offset = cO; __kmp_place_num_threads_per_core = nT; } // end of file //
ForEach.h
#pragma once #ifdef OPENMP_INCLUDED #include <template_tensors/util/Memory.h> #include <template_tensors/cuda/Cuda.h> #include <template_tensors/for_each/Helper.h> #include <omp.h> namespace openmp { struct ForEach { template <bool TIsOnHost, metal::int_ TNum, mem::MemoryType TMemoryType> TVALUE(for_each::Availability, availability_v, (TIsOnHost && (mem::isOnHost<TMemoryType, TIsOnHost>() || TMemoryType == mem::UNKNOWN)) ? for_each::YES : for_each::NO) template <bool TIsOnHost, metal::int_ TNum, mem::MemoryType TMemoryType> TVALUE(bool, is_parallel_v, true) template <metal::int_ TNum = for_each::DYN, mem::MemoryType TMemoryType = mem::UNKNOWN, bool TMustBeAvailable = true, typename TIteratorBegin, typename TIteratorEnd, typename TFunctor> __host__ static bool for_each(TIteratorBegin begin, TIteratorEnd end, TFunctor func) { #pragma omp parallel for for (TIteratorBegin it = begin; it < end; ++it) { func(*it); } return true; } }; } // end of ns openmp #endif
aliLfold.c
/* Last changed Time-stamp: <2006-03-02 22:32:02 ivo> */ /* minimum free energy consensus RNA secondary structure prediction with maximum distance base pairs c Ivo Hofacker, Stephan Bernhart Vienna RNA package */ #include <config.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctype.h> #include <string.h> #include "utils.h" #include "energy_par.h" #include "fold_vars.h" #include "pair_mat.h" #include "params.h" #include "ribo.h" #include "alifold.h" #include "fold.h" #include "loop_energies.h" #ifdef _OPENMP #include <omp.h> #endif /*@unused@*/ static char rcsid[] UNUSED = "$Id: aliLfold.c,v 1.1 2007/06/23 08:49:57 ivo Exp $"; #define PAREN #define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */ #define NEW_NINIO 1 /* new asymetry penalty */ #define MAXSECTORS 500 /* dimension for a backtrack array */ #define LOCALITY 0. /* locality parameter for base-pairs */ #define UNIT 100 #define MINPSCORE -2 * UNIT #define NONE -10000 /* score for forbidden pairs */ /* ################################# # GLOBAL VARIABLES # ################################# */ /* ################################# # PRIVATE VARIABLES # ################################# */ PRIVATE paramT *P = NULL; PRIVATE int **c = NULL; /* energy array, given that i-j pair */ PRIVATE int *cc = NULL; /* linear array for calculating canonical structures */ PRIVATE int *cc1 = NULL; /* " " */ PRIVATE int *f3 = NULL; /* energy of 5' end */ PRIVATE int **fML = NULL; /* multi-loop auxiliary energy array */ PRIVATE int *Fmi = NULL; /* holds row i of fML (avoids jumps in memory) */ PRIVATE int *DMLi = NULL; /* DMLi[j] holds MIN(fML[i,k]+fML[k+1,j]) */ PRIVATE int *DMLi1 = NULL; /* MIN(fML[i+1,k]+fML[k+1,j]) */ PRIVATE int *DMLi2 = NULL; /* MIN(fML[i+2,k]+fML[k+1,j]) */ PRIVATE int **pscore = NULL; /* precomputed array of pair types */ PRIVATE unsigned int length; PRIVATE short **S = NULL; PRIVATE short **S5 = NULL; /*S5[s][i] holds next base 5' of i in sequence s*/ PRIVATE short **S3 = NULL; /*Sl[s][i] holds next base 3' of i in sequence s*/ PRIVATE char **Ss = NULL; PRIVATE unsigned short **a2s = NULL; PRIVATE float **dm = NULL; PRIVATE int olddm[7][7]= {{0,0,0,0,0,0,0}, /* hamming distance between pairs PRIVATE needed??*/ {0,0,2,2,1,2,2} /* CG */, {0,2,0,1,2,2,2} /* GC */, {0,2,1,0,2,1,2} /* GU */, {0,1,2,2,0,2,1} /* UG */, {0,2,2,1,2,0,2} /* AU */, {0,2,2,2,1,2,0} /* UA */}; PRIVATE int energyout; PRIVATE int energyprev; #ifdef _OPENMP /* NOTE: all variables are assumed to be uninitialized if they are declared as threadprivate */ #pragma omp threadprivate(P, c, cc, cc1, f3, fML, Fmi, DMLi, DMLi1, DMLi2, pscore, length, S, dm, S5, S3, Ss, a2s, energyout, energyprev) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void initialize_aliLfold(int length, int maxdist); PRIVATE void free_aliL_arrays(int maxdist); PRIVATE void get_arrays(unsigned int size, int maxdist); PRIVATE short *encode_seq(const char *sequence, short *s5, short *s3, char *ss, unsigned short *as); PRIVATE void make_pscores(const char ** AS, const char *structure,int maxdist, int start); PRIVATE int fill_arrays(const char **strings, int maxdist, char *structure); PRIVATE char *backtrack(const char **strings, int start, int maxdist); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PRIVATE void initialize_aliLfold(int length, int maxdist){ if (length<1) nrerror("initialize_fold: argument must be greater 0"); get_arrays((unsigned) length, maxdist); make_pair_matrix(); if(P) free(P); P = scale_parameters(); } /*--------------------------------------------------------------------------*/ PRIVATE void get_arrays(unsigned int size, int maxdist) { int i; c = (int **)space(sizeof(int *)*(size+1)); fML = (int **)space(sizeof(int *)*(size+1)); pscore = (int **)space(sizeof(int *)*(size+1)); f3 = (int *) space(sizeof(int)*(size+2)); /* has to be one longer */ cc = (int *) space(sizeof(int)*(maxdist+5)); cc1 = (int *) space(sizeof(int)*(maxdist+5)); Fmi = (int *) space(sizeof(int)*(maxdist+5)); DMLi = (int *) space(sizeof(int)*(maxdist+5)); DMLi1 = (int *) space(sizeof(int)*(maxdist+5)); DMLi2 = (int *) space(sizeof(int)*(maxdist+5)); for (i=size; i>(int)size-maxdist-5 && i>=0; i--) { c[i] = (int *) space(sizeof(int) *(maxdist+5)); fML[i] = (int *) space(sizeof(int) *(maxdist+5)); pscore[i] = (int *) space(sizeof(int )*(maxdist+5)); } } /*--------------------------------------------------------------------------*/ PRIVATE void free_aliL_arrays(int maxdist) { int i; for(i=0; i<maxdist+5 && i<=length; i++){ free(c[i]); free(fML[i]); free(pscore[i]); } free(c); free(fML); free(f3); free(cc); free(cc1); free(pscore); free(Fmi); free(DMLi); free(DMLi1); free(DMLi2); } /*--------------------------------------------------------------------------*/ PUBLIC float aliLfold(const char **strings, char *structure, int maxdist) { int length, energy, s, n_seq, i, j; length = (int) strlen(strings[0]); if (maxdist>length) maxdist = length; initialize_aliLfold(length, maxdist); for (s=0; strings[s]!=NULL; s++); n_seq = s; S = (short **) space(n_seq*sizeof(short *)); S5 = (short **) space(n_seq*sizeof(short *)); S3 = (short **) space(n_seq*sizeof(short *)); a2s = (unsigned short **) space(n_seq*sizeof(unsigned short *)); Ss = (char **) space(n_seq*sizeof(char *)); for (s=0; s<n_seq; s++) { if (strlen(strings[s]) != length) nrerror("uneqal seqence lengths"); S5[s] = (short *) space((length+2)*sizeof(short)); S3[s] = (short *) space((length+2)*sizeof(short)); a2s[s] = (unsigned short *) space((length+2)*sizeof(unsigned short)); Ss[s] = (char *) space((length+2)*sizeof(char)); S[s] = encode_seq(strings[s], S5[s],S3[s],Ss[s],a2s[s]); } if (ribo) { if (RibosumFile !=NULL) dm=readribosum(RibosumFile); else dm=get_ribosum(strings, n_seq, S[0][0]); } else { /*use usual matrix*/ dm=(float **)space(7*sizeof(float*)); for (i=0; i<7;i++) { dm[i]=(float *)space(7*sizeof(float)); for (j=0; j<7; j++) dm[i][j] = (float) olddm[i][j]; } } for (i=length; i>=(int)length-(int)maxdist-4 && i>0; i--) make_pscores((const char **) strings,structure,maxdist,i); energy = fill_arrays(strings, maxdist, structure); free_aliL_arrays(maxdist); return (float) energy/100.; } PRIVATE int fill_arrays(const char **strings, int maxdist, char *structure) { /* fill "c", "fML" and "f3" arrays and return optimal energy */ int i, j, k, length, energy; int decomp, new_fML,MLenergy ; int *type, type_2, tt, s, n_seq, no_close, lastf, lastf2, thisj, lastj, lastj2; lastf = lastf2 = INF; /* int bonus=0;*/ length = (int) strlen(strings[0]); for (s=0; strings[s]!=NULL; s++); n_seq = s; type = (int *) space(n_seq*sizeof(int)); for (j=0; j<maxdist+5; j++) Fmi[j]=DMLi[j]=DMLi1[j]=DMLi2[j]=INF; for (j=length; j>length-maxdist-3; j--) { for (i=(length-maxdist-2>0)?length-maxdist-2:1 ; i<j; i++) c[i][j-i] = fML[i][j-i] = INF; } for (i = length-TURN-1; i >= 1; i--) { /* i,j in [1..length] */ for (j = i+1; j<=length && j<=i+TURN; j++) { c[i][j-i]=fML[i][j-i]=INF; } for (j = i+TURN+1; j <= length && j <= i+maxdist; j++) { int p, q, psc; /* bonus = 0;*/ for (s=0; s<n_seq; s++) { type[s] = pair[S[s][i]][S[s][j]]; if (type[s]==0) type[s]=7; } psc = pscore[i][j-i]; if (psc>=cv_fact*MINPSCORE) { /* we have a pair 2 consider */ int new_c=0, stackEnergy=INF; /* hairpin ----------------------------------------------*/ for (new_c=s=0; s<n_seq; s++){ if((a2s[s][j-1] - a2s[s][i]) < 3) new_c += 600; else new_c += E_Hairpin(a2s[s][j-1]-a2s[s][i],type[s],S3[s][i],S5[s][j],Ss[s]+(a2s[s][i-1]), P); } /*-------------------------------------------------------- check for elementary structures involving more than one closing pair. --------------------------------------------------------*/ for (p = i+1; p <= MIN2(j-2-TURN,i+MAXLOOP+1) ; p++) { int minq = j-i+p-MAXLOOP-2; if (minq<p+1+TURN) minq = p+1+TURN; for (q = minq; q < j; q++) { if (pscore[p][q-p]<MINPSCORE) continue; for (energy = s=0; s<n_seq; s++) { type_2 = pair[S[s][q]][S[s][p]]; /* q,p not p,q! */ if (type_2 == 0) type_2 = 7; energy += E_IntLoop(a2s[s][p-1]-a2s[s][i], a2s[s][j-1]-a2s[s][q], type[s], type_2, S3[s][i], S5[s][j], S5[s][p], S3[s][q], P); } new_c = MIN2(energy+c[p][q-p], new_c); if ((p==i+1)&&(j==q+1)) stackEnergy = energy; /* remember stack energy */ } /* end q-loop */ } /* end p-loop */ /* multi-loop decomposition ------------------------*/ decomp = DMLi1[j-1-(i+1)]; if (dangles) { for (s=0; s<n_seq; s++) { tt = rtype[type[s]]; decomp += E_MLstem(tt, S5[s][j], S3[s][i], P); } } else{ for(s=0; s<n_seq; s++){ tt = rtype[type[s]]; decomp += E_MLstem(tt, -1, -1, P); } } MLenergy = decomp + n_seq*P->MLclosing; new_c = MIN2(new_c, MLenergy); new_c = MIN2(new_c, cc1[j-1-(i+1)]+stackEnergy); cc[j-i] = new_c - psc; /* add covariance bonnus/penalty */ if (noLonelyPairs) c[i][j-i] = cc1[j-1-(i+1)]+stackEnergy-psc; else c[i][j-i] = cc[j-i]; } /* end >> if (pair) << */ else c[i][j-i] = INF; /* done with c[i,j], now compute fML[i,j] */ /* free ends ? -----------------------------------------*/ new_fML = fML[i+1][j-i-1]+n_seq*P->MLbase; new_fML = MIN2(fML[i][j-1-i]+n_seq*P->MLbase, new_fML); energy = c[i][j-i]/*+P->MLintern[type]*/; if(dangles){ for (s=0; s<n_seq; s++) { energy += E_MLstem(type[s], (i > 1) ? S5[s][i] : -1, (j < length) ? S3[s][j] : -1, P); } } else{ for (s=0; s<n_seq; s++) { energy += E_MLstem(type[s], -1, -1, P); } } new_fML = MIN2(energy, new_fML); /* modular decomposition -------------------------------*/ for (decomp = INF, k = i+1+TURN; k <= j-2-TURN; k++) decomp = MIN2(decomp, Fmi[k-i]+fML[k+1][j-k-1]); DMLi[j-i] = decomp; /* store for use in ML decompositon */ new_fML = MIN2(new_fML,decomp); fML[i][j-i] = Fmi[j-i] = new_fML; /* substring energy */ } /* for (j...) */ /* calculate energies of 5' and 3' fragments */ { static int do_backtrack = 0, prev_i=0, prev_j=0; static char * prev=NULL; char *ss; int tempf3=length; int k; int thisf=0; f3[i] = f3[i+1]; for (j=i+TURN+1; j<length && j<=i+maxdist; j++) { if(c[i][j-i]<INF) { /* if (c[j+1]<INF) {*/ energy = c[i][j-i]; if(dangles){ for(s = 0; s < n_seq; s++){ tt = pair[S[s][i]][S[s][j]]; if(tt==0) tt=7; energy += E_ExtLoop(tt, (i>1) ? S5[s][i] : -1, S3[s][j], P); } } else{ for(s = 0; s < n_seq; s++){ tt = pair[S[s][i]][S[s][j]]; if(tt==0) tt=7; energy += E_ExtLoop(tt, -1, -1, P); } } if (energy/(j-i+1) < thisf){ thisf = energy/(j-i+1); thisj = j; } energy += f3[j+1]; if(f3[i] > energy){ f3[i] = energy; tempf3 = j+1; } } } if(length <= i+maxdist){ j = length; if(c[i][j-i]<INF) { energy = c[i][j-i]; if(dangles){ for (s=0; s<n_seq; s++) { tt = pair[S[s][i]][S[s][j]]; if(tt==0) tt=7; energy += E_ExtLoop(tt, (i>1) ? S5[s][i] : -1, -1, P); } } else{ for (s=0; s<n_seq; s++) { tt = pair[S[s][i]][S[s][j]]; if(tt==0) tt=7; energy += E_ExtLoop(tt, -1, -1, P); } } /* thisf=MIN2(energy/(j-i+1),thisf); ???*/ if (energy/(j-i+1) < thisf){ thisf = energy/(j-i+1); thisj = j; } f3[i] = MIN2(f3[i], energy); } } /* backtrack partial structure */ /* if (i+maxdist<length) {*/ if (i<length-1){ if (f3[i] != f3[i+1]) { do_backtrack = 1; backtrack_type = 'F'; if (prev_i==0) { prev = backtrack(strings, i , MIN2(maxdist,length-i)); prev_i = i; do_backtrack = 0; prev_j = thisj; lastf2 = lastf; lastj2 = lastj; energyprev = f3[i]; } } else if((thisf < lastf) && (thisf < lastf2) && ((thisf/(n_seq*100)) < -0.01)){ /*?????????*/ do_backtrack = 2; backtrack_type = 'C'; } else if (do_backtrack){ if(do_backtrack == 1){ ss = backtrack(strings, i+1 , MIN2(maxdist,length-i)/*+1*/); energyout = f3[i] - f3[i+strlen(ss)-1];/*??*/ } else { ss = backtrack(strings, i+1 , lastj-i-2); energyout=c[i+1][lastj-(i+1)]; if(dangles){ for (s=0; s<n_seq; s++) { int type; type = pair[S[s][i+1]][S[s][lastj-i]]; if (type==0) type=7; energyout += E_ExtLoop(type, (i>1) ? S5[s][i+1] : -1, S3[s][lastj-i], P); } } else{ for (s=0; s<n_seq; s++) { int type; type = pair[S[s][i+1]][S[s][lastj-i]]; if (type==0) type=7; energyout += E_ExtLoop(type, -1, -1, P); } } } if((prev_i + strlen(prev) > i+1+strlen(ss)) || (do_backtrack==2)){ char *outstr = (char *)space(sizeof(char) * (strlen(prev)+1)); strncpy(outstr, strings[0]+prev_i-1, strlen(prev)); outstr[strlen(prev)] = '\0'; if (csv==1) printf("%s , %6.2f, %4d, %4d\n",prev, energyprev/(100.*n_seq), prev_i,prev_i + (int)strlen(prev)-1); /* if(do_backtrack==1)*/ else { printf("%s (%6.2f) %4d - %4d\n",prev, energyprev/(100.*n_seq), prev_i,prev_i + (int)strlen(prev)-1); } free(outstr); } free(prev); prev = ss; energyprev = energyout; prev_i = i+1; prev_j = lastj; do_backtrack = 0; backtrack_type='F'; } } lastf2 = lastf; lastf = thisf; lastj2 = lastj; lastj = thisj; if (i==1) { char *outstr = NULL; if (prev) { outstr = (char *)space(sizeof(char) *(strlen(prev) + 1)); strncpy(outstr, strings[0]+prev_i-1, strlen(prev)); outstr[strlen(prev)] = '\0'; if(csv==1) printf("%s ,%6.2f, %4d, %4d\n", prev, (energyprev)/(100.*n_seq), prev_i,prev_i + (int)strlen(prev)-1); else{ printf("%s (%6.2f) %4d - %4d\n", prev, (energyprev)/(100.*n_seq), prev_i,prev_i + (int)strlen(prev)-1); } } if ((f3[prev_i] != f3[1]) || !prev){ ss = backtrack(strings, i , maxdist); if(outstr) free(outstr); outstr = (char *)space(sizeof(char) * (strlen(ss) + 1)); strncpy(outstr, strings[0], strlen(ss)); outstr[strlen(ss)] = '\0'; printf("%s \n", outstr); if(csv==1) printf("%s ,%6.2f ,%4d ,%4d\n", ss, (f3[1]-f3[1 + strlen(ss)-1])/(100.*n_seq), 1, (int)strlen(ss)-1); else{ printf("%s (%6.2f) %4d - %4d\n", ss, (f3[1]-f3[1 + strlen(ss)-1])/(100.*n_seq), 1, (int)strlen(ss)-1); } free(ss); } if(prev) free(prev); if(outstr) free(outstr); } } { int ii, *FF; /* rotate the auxilliary arrays */ FF = DMLi2; DMLi2 = DMLi1; DMLi1 = DMLi; DMLi = FF; FF = cc1; cc1=cc; cc=FF; for (j=0; j< maxdist+5; j++) {cc[j]=Fmi[j]=DMLi[j]=INF; } if (i<=length-maxdist-4) { c[i-1] = c[i+maxdist+4]; c[i+maxdist+4] = NULL; fML[i-1] = fML[i+maxdist+4]; fML[i+maxdist+4]=NULL; pscore[i-1] = pscore[i+maxdist+4]; pscore[i+maxdist+4] = NULL; if(i > 1) make_pscores((const char**) strings, structure, maxdist, i-1); for(ii=0; ii<maxdist+5; ii++) { c[i-1][ii] = fML[i-1][ii] = INF; } } } } return f3[1]; } PRIVATE char * backtrack(const char **strings, int start, int maxdist) { /*------------------------------------------------------------------ trace back through the "c", "f3" and "fML" arrays to get the base pairing list. No search for equivalent structures is done. This is fast, since only few structure elements are recalculated. ------------------------------------------------------------------*/ sect sector[MAXSECTORS]; /* backtracking sectors */ int i, j, k, energy; int *type, type_2, tt, n_seq; /*int bonus;*/ int s=0, ss; char *structure; for (s=0; strings[s]!=NULL; s++); n_seq = s; type = (int *) space(n_seq*sizeof(int)); s=0; length = strlen(strings[0]); sector[++s].i = start; sector[s].j = MIN2(length, start+maxdist+1); sector[s].ml = (backtrack_type=='M') ? 1 : ((backtrack_type=='C')?2:0); structure = (char *) space((MIN2(length-start, maxdist)+3)*sizeof(char)); for (i=0; i<=MIN2(length-start, maxdist); i++) structure[i] = '.'; while (s>0) { int ml, fij, cij, traced, i1, j1, d3, d5, mm, p, q, jj=0; int canonical = 1; /* (i,j) closes a canonical structure */ i = sector[s].i; j = sector[s].j; ml = sector[s--].ml; /* ml is a flag indicating if backtracking is to occur in the fML- (1) or in the f-array (0) */ if (ml==2) { structure[i-start] = '('; structure[j-start] = ')'; goto repeat1; } if (j < i+TURN+1) continue; /* no more pairs in this interval */ fij = (ml)? fML[i][j-i] : f3[i]; if (ml == 0) { /* backtrack in f3 */ if (fij == f3[i+1]) { sector[++s].i = i+1; sector[s].j = j; sector[s].ml = ml; continue; } /* i is paired. Find pairing partner */ for (k=i+TURN+1,traced=0; k<=j; k++) { int cc; jj = k+1; cc = c[i][k-(i)]; if (cc<INF) { if(dangles){ for (ss=0; ss<n_seq; ss++) { type[ss] = pair[S[ss][i]][S[ss][k]]; if (type[ss]==0) type[ss]=7; cc += E_ExtLoop(type[ss], (i>1) ? S5[ss][i] : -1, (k<length) ? S3[ss][k] : -1, P); } } else{ for (ss=0; ss<n_seq; ss++) { type[ss] = pair[S[ss][i]][S[ss][k]]; if (type[ss]==0) type[ss]=7; cc += E_ExtLoop(type[ss], -1, -1, P); } } if (fij == cc + f3[k+1]) traced=i; } if (traced) break; } if (!traced) nrerror("backtrack failed in f3"); if (j==length) { /* backtrack only one component, unless j==length */ sector[++s].i = jj; sector[s].j = j; sector[s].ml = ml; } i=traced; j=k; structure[i-start] = '('; structure[j-start] = ')'; goto repeat1; } else { /* trace back in fML array */ if (fML[i][j-1-i]+n_seq*P->MLbase == fij) { /* 3' end is unpaired */ sector[++s].i = i; sector[s].j = j-1; sector[s].ml = ml; continue; } if (fML[i+1][j-(i+1)]+n_seq*P->MLbase == fij) { /* 5' end is unpaired */ sector[++s].i = i+1; sector[s].j = j; sector[s].ml = ml; continue; } cij = c[i][j-i] ; if(dangles){ for (ss=0; ss<n_seq; ss++) { tt = pair[S[ss][i]][S[ss][j]]; if (tt==0) tt=7; cij += E_MLstem(tt, (i>1) ? S5[ss][i] : -1, (j<length) ? S3[ss][j] : -1, P); } } else{ for (ss=0; ss<n_seq; ss++) { tt = pair[S[ss][i]][S[ss][j]]; if (tt==0) tt=7; cij += E_MLstem(tt, -1, -1, P); } } if(fij==cij){ /* found a pair */ structure[i-start] = '('; structure[j-start] = ')'; goto repeat1; } for (k = i+1+TURN; k <= j-2-TURN; k++) if (fij == (fML[i][k-i]+fML[k+1][j-(k+1)])) break; sector[++s].i = i; sector[s].j = k; sector[s].ml = ml; sector[++s].i = k+1; sector[s].j = j; sector[s].ml = ml; if (k>j-2-TURN) nrerror("backtrack failed in fML"); continue; } repeat1: /*----- begin of "repeat:" -----*/ if (canonical) cij = c[i][j-i]; for (ss=0; ss<n_seq; ss++) { type[ss] = pair[S[ss][i]][S[ss][j]]; if (type[ss]==0) type[ss] = 7; } /* bonus = 0;*/ if (noLonelyPairs) if (cij == c[i][j-i]) { /* (i.j) closes canonical structures, thus (i+1.j-1) must be a pair */ for (ss=0; ss<n_seq; ss++) { type_2 = pair[S[ss][j-1]][S[ss][i+1]]; /* j,i not i,j */ if (type_2==0) type_2 = 7; cij -= P->stack[type[ss]][type_2]; } cij += pscore[i][j-i]; structure[i+1-start] = '('; structure[j-1-start] = ')'; i++; j--; canonical=0; goto repeat1; } canonical = 1; cij += pscore[i][j-i]; { int cc=0; for (ss=0; ss<n_seq; ss++){ if((a2s[ss][j-1] - a2s[ss][i]) < 3) cc += 600; else cc += E_Hairpin(a2s[ss][j-1] - a2s[ss][i], type[ss], S3[ss][i], S5[ss][j], Ss[ss] + a2s[ss][i-1], P); } if (cij == cc) /* found hairpin */ continue; } for (p = i+1; p <= MIN2(j-2-TURN,i+MAXLOOP+1); p++) { int minq; minq = j-i+p-MAXLOOP-2; if (minq<p+1+TURN) minq = p+1+TURN; for (q = j-1; q >= minq; q--) { if (c[p][q-p]>=INF) continue; for (ss=energy=0; ss<n_seq; ss++) { type_2 = pair[S[ss][q]][S[ss][p]]; /* q,p not p,q */ if (type_2==0) type_2 = 7; energy += E_IntLoop(a2s[ss][p-1] - a2s[ss][i], a2s[ss][j-1] - a2s[ss][q], type[ss], type_2, S3[ss][i], S5[ss][j], S5[ss][p], S3[ss][q], P); } traced = (cij == energy+c[p][q-p]); if (traced) { structure[p-start] = '('; structure[q-start] = ')'; i = p, j = q; goto repeat1; } } } /* end of repeat: --------------------------------------------------*/ /* (i.j) must close a multi-loop */ mm = n_seq*P->MLclosing; if(dangles){ for (ss=0; ss<n_seq; ss++) { tt = rtype[type[ss]]; mm += E_MLstem(tt, S5[ss][j],S3[ss][i], P); } } else{ for (ss=0; ss<n_seq; ss++) { tt = rtype[type[ss]]; mm += E_MLstem(tt, -1, -1, P); } } i1 = i+1; j1 = j-1; sector[s+1].ml = sector[s+2].ml = 1; for (k = i+TURN+2; k < j-TURN-2; k++){ if(cij == fML[i+1][k-(i+1)] + fML[k+1][j-1-(k+1)] + mm) break; } if (k<=j-3-TURN){ /* found the decomposition */ sector[++s].i = i1; sector[s].j = k; sector[++s].i = k+1; sector[s].j = j1; } else { nrerror("backtracking failed in repeat"); } } if (start+maxdist<length) { for (i=strlen(structure); i>0 && structure[i-1] == '.'; i--) structure[i] = '\0'; } return structure; } /*---------------------------------------------------------------------------*/ PRIVATE short *encode_seq(const char *sequence, short *s5, short *s3, char *ss, unsigned short *as){ unsigned int i,l; short *S; unsigned short p; l = strlen(sequence); S = (short *) space(sizeof(short)*(l+2)); S[0] = (short) l; s5[0]=s5[1]=0; /* make numerical encoding of sequence */ for (i=1; i<=l; i++) { short ctemp = (short)encode_char(toupper(sequence[i-1])); S[i] = ctemp ; } if (oldAliEn) { /*use alignment sequences in all energy evaluations*/ ss[0]=sequence[0]; for (i=1; i<l; i++) { s5[i]=S[i-1]; s3[i]=S[i+1]; ss[i]= sequence[i]; as[i]=i; } ss[l] = sequence[l]; as[l]=l; s5[l]=S[l-1]; s3[l]=0; S[l+1] = S[1]; s5[1]=0; if (1) { s5[1]=S[l]; s3[l]=S[1]; ss[l+1]=S[1]; } return S; } else { if (1) { for (i=l; i>0; i--) { char c5; c5=sequence[i-1]; if ((c5=='-')||(c5=='_')||(c5=='~')||(c5=='.')) continue; s5[1] = S[i]; break; } for (i=1; i<=l; i++) { char c3; c3 = sequence[i-1]; if ((c3=='-')||(c3=='_')||(c3=='~')||(c3=='.')) continue; s3[l] = S[i]; break; } } else s5[1]=s3[l]=0; for (i=1,p=0; i<=l; i++) { char c5; c5=sequence[i-1]; if ((c5=='-')||(c5=='_')||(c5=='~')||(c5=='.')) s5[i+1]=s5[i]; else { /* no gap */ ss[p++]=sequence[i-1]; /*start at 0!!*/ s5[i+1]=S[i]; } as[i]=p; } for (i=l; i>=1; i--) { char c3; c3=sequence[i-1]; if ((c3=='-')||(c3=='_')||(c3=='~')||(c3=='.')) s3[i-1]=s3[i]; else s3[i-1]=S[i]; } } return S; } PRIVATE double cov_score(const char **AS, int i, int j) { int n_seq,k,l,s; double score; int pfreq[8]={0,0,0,0,0,0,0,0}; for (n_seq=0; AS[n_seq]!=NULL; n_seq++); for (s=0; s<n_seq; s++) { int type; if (S[s][i]==0 && S[s][j]==0) type = 7; /* gap-gap */ else { if ((AS[s][i] == '~')||(AS[s][j] == '~')) type = 7; else type = pair[S[s][i]][S[s][j]]; } pfreq[type]++; } if (pfreq[0]*2+pfreq[7]>n_seq) return NONE; else for (k=1,score=0.; k<=6; k++) /* ignore pairtype 7 (gap-gap) */ for (l=k; l<=6; l++) /* scores for replacements between pairtypes */ /* consistent or compensatory mutations score 1 or 2 */ score += pfreq[k]*pfreq[l]*dm[k][l]; /* counter examples score -1, gap-gap scores -0.25 */ return cv_fact * ((UNIT*score)/n_seq - nc_fact*UNIT*(pfreq[0] + pfreq[7]*0.25)); } PRIVATE void make_pscores(const char ** AS, const char *structure, int maxd, int i) { /* calculate co-variance bonus for each pair depending on */ /* compensatory/consistent mutations and incompatible seqs */ /* should be 0 for conserved pairs, >0 for good pairs */ int n,j,k,l,s; n=S[0][0]; /* length of seqs */ /*first allocate space:*/ pscore[i]=(int *)space((maxd+5)*sizeof(int)); /* pscore[start]-=start;*/ /*fill pscore[start], too close*/ for (j=i+1; (j<i+TURN+1) && (j<=n); j++) { pscore[i][j-i] = NONE; } for (j=i+TURN+1; ((j<=n) && (j<=i+maxd)); j++) { pscore[i][j-i] = cov_score(AS, i, j); } if (noLonelyPairs) { /* remove unwanted lonely pairs */ int type, otype=0, ntype=0; for (j=i+TURN; ((j<n)&&(j<i+maxd)); j++) { if ((i>1) && (j<n)) otype = cov_score(AS, i-1, j+1); type=pscore[i][j-i]; if (i<n) ntype=pscore[i+1][j-1-(i+1)]; else ntype=NONE; if ((otype<-4*UNIT)&&(ntype<-4*UNIT)) /* worse than 2 counterex */ pscore[i][j-i] = NONE; /* i.j can only form isolated pairs */ } } if (fold_constrained&&(structure!=NULL)) { int psij, hx, *stack; stack = (int *) space(sizeof(int)*(n+1)); hx=psij=0; /* for(hx=0, j=i+TURN; ((j<=i+maxd)&&(j<=n)); j++) {*/ switch (structure[i-1]) { case 'x': /* can't pair */ for (l=i+TURN+1; l<=i+maxd; l++) pscore[i][l-i] = NONE; break; case '(': hx=1; psij=1; for (l=i+1; l<=i+maxd; l++) { switch (structure[l-1]) { case '(': hx++; pscore[i][l-i] = NONE; break; case ')': hx--; if (hx!=0) pscore[i][l-i] = NONE; break; default: pscore[i][l-i] = NONE; } /* fallthrough */ } case ')': for (l=i+TURN+1; l<=i+maxd; l++) pscore[i][l-i] = NONE; break; case '>': for (l=i+TURN+1; l<=i+maxd; l++) pscore[i][l-i] = NONE; break; } if (!psij) for (l=i+1; l<=i+maxd; l++) { /*no '(' constraint on i*/ switch (structure[l-1]) { case '(': pscore[i][l-i] = NONE; break; case '<': pscore[i][l-i] = NONE; break; case 'x': pscore[i][l-i] = NONE; break; case ')': pscore[i][l-i] = NONE; break; } } if (hx!=0) { fprintf(stderr, "%s\n", structure); nrerror("unbalanced brackets in constraint string"); } free(stack); } }
kmp_set_dispatch_buf.c
// RUN: %libomp-compile // RUN: env KMP_DISP_NUM_BUFFERS=0 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=1 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=3 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=4 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=7 %libomp-run // RUN: %libomp-compile -DMY_SCHEDULE=guided // RUN: env KMP_DISP_NUM_BUFFERS=1 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=3 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=4 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=7 %libomp-run // UNSUPPORTED: clang-11 #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <limits.h> #include "omp_testsuite.h" #define INCR 7 #define MY_MAX 200 #define MY_MIN -200 #define NUM_LOOPS 100 #ifndef MY_SCHEDULE # define MY_SCHEDULE dynamic #endif int a, b, a_known_value, b_known_value; int test_kmp_set_disp_num_buffers() { int success = 1; a = 0; b = 0; // run many small dynamic loops to stress the dispatch buffer system #pragma omp parallel { int i,j; for (j = 0; j < NUM_LOOPS; j++) { #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MIN; i < MY_MAX; i+=INCR) { #pragma omp atomic a++; } #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MAX; i >= MY_MIN; i-=INCR) { #pragma omp atomic b++; } } } // detect failure if (a != a_known_value || b != b_known_value) { success = 0; printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value, b, b_known_value); } return success; } int main(int argc, char** argv) { int i,j; int num_failed=0; // figure out the known values to compare with calculated result a_known_value = 0; b_known_value = 0; for (j = 0; j < NUM_LOOPS; j++) { for (i = MY_MIN; i < MY_MAX; i+=INCR) a_known_value++; for (i = MY_MAX; i >= MY_MIN; i-=INCR) b_known_value++; } for(i = 0; i < REPETITIONS; i++) { if(!test_kmp_set_disp_num_buffers()) { num_failed++; } } return num_failed; }
bml_transpose_dense_typed.c
#ifdef BML_USE_MAGMA #include "magma_v2.h" #endif #include "../../macros.h" #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_parallel.h" #include "../bml_transpose.h" #include "../bml_types.h" #include "bml_allocate_dense.h" #include "bml_transpose_dense.h" #include "bml_types_dense.h" #include <complex.h> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /** Transpose a matrix. * * \ingroup transpose_group * * \param A The matrix to be transposed * \return The transposed A */ bml_matrix_dense_t *TYPED_FUNC( bml_transpose_new_dense) ( bml_matrix_dense_t * A) { int N = A->N; bml_matrix_dimension_t matrix_dimension = { A->N, A->N, A->N }; bml_matrix_dense_t *B = TYPED_FUNC(bml_zero_matrix_dense) (matrix_dimension, A->distribution_mode); REAL_T *A_matrix = A->matrix; REAL_T *B_matrix = B->matrix; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; int myRank = bml_getMyRank(); #ifdef BML_USE_MAGMA magma_queue_sync(A->queue); MAGMABLAS(transpose) (A->N, A->N, A->matrix, A->ld, B->matrix, B->ld, B->queue); magma_queue_sync(B->queue); #else #pragma omp parallel for \ shared(N, A_matrix, B_matrix) \ shared(A_localRowMin, A_localRowMax, myRank) //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { for (int j = 0; j < N; j++) { B_matrix[ROWMAJOR(i, j, N, N)] = A_matrix[ROWMAJOR(j, i, N, N)]; } } #endif return B; } /** Transpose a matrix in place. * * \ingroup transpose_group * * \param A The matrix to be transposed * \return The transposed A */ void TYPED_FUNC( bml_transpose_dense) ( bml_matrix_dense_t * A) { int N = A->N; #ifdef BML_USE_MAGMA magma_queue_sync(A->queue); MAGMABLAS(transpose_inplace) (A->N, A->matrix, A->ld, A->queue); #else REAL_T *A_matrix = A->matrix; REAL_T tmp; #pragma omp parallel for \ private(tmp) \ shared(N, A_matrix) for (int i = 0; i < N - 1; i++) { for (int j = i + 1; j < N; j++) { if (i != j) { tmp = A_matrix[ROWMAJOR(i, j, N, N)]; A_matrix[ROWMAJOR(i, j, N, N)] = A_matrix[ROWMAJOR(j, i, N, N)]; A_matrix[ROWMAJOR(j, i, N, N)] = tmp; } } } #endif }
mulpara.c
#include <stdio.h> #include <omp.h> #define NUM_THREADS 10 int main(int argc, char const *argv[]) { FILE* MatrixFile; MatrixFile = fopen("mulpara.txt","w"); int n=1000; float mat[n][n]; float c[n][n]; double t1,t2,tiempo; t1 = omp_get_wtime(); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { mat[i][j] = j; } } #pragma omp parallel { #pragma omp for { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { c[i][j]=0; for (int k = 0; k < n; k++){ c[i][j] = c[i][j] + mat[i][k]*mat[k][j]; } } } } } t2 = omp_get_wtime(); tiempo = t2-t1; printf("Tomo: %lf segundos\n", tiempo); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { fprintf(MatrixFile, "%f\t", c[i][j]); } } fclose(MatrixFile); }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(8*t1+Ny+13,32)),floord(16*t2+Ny+12,32)),floord(16*t1-16*t2+Nz+Ny+11,32));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(16*t2-Nz-252,256)),ceild(32*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(8*t1+Nx+13,256)),floord(16*t2+Nx+12,256)),floord(32*t3+Nx+28,256)),floord(16*t1-16*t2+Nz+Nx+11,256));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),32*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),32*t3+30),256*t4+254),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(256*t4,t5+1); ubv=min(256*t4+255,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unaryop__abs_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_int8 // op(A') function: GB_tran__abs_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_int8 ( uint64_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
trilinos_block_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_TRILINOS_BLOCK_BUILDER_AND_SOLVER ) #define KRATOS_TRILINOS_BLOCK_BUILDER_AND_SOLVER /* System includes */ #include <set> /* External includes */ #include "boost/timer.hpp" /* Project includes */ #include "includes/define.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "Epetra_MpiComm.h" //trilinos includes #include "Epetra_Map.h" #include "Epetra_Vector.h" #include "Epetra_FECrsGraph.h" #include "Epetra_FECrsMatrix.h" #include "Epetra_IntSerialDenseVector.h" #include "Epetra_IntVector.h" #include "Epetra_SerialDenseMatrix.h" #include "Epetra_SerialDenseVector.h" #include "EpetraExt_RowMatrixOut.h" #include "EpetraExt_MultiVectorOut.h" #include "Epetra_Import.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /** Short class definition. Detail class definition. Current class provides an implementation for standard builder and solving operations. the RHS is constituted by the unbalanced loads (residual) Degrees of freedom are reordered putting the restrained degrees of freedom at the end of the system ordered in reverse order with respect to the DofSet. Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information. Calculation of the reactions involves a cost very similiar to the calculation of the total residual \URL[Example of use html]{ extended_documentation/no_ex_of_use.html} \URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf} \URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc} \URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps} \URL[Extended documentation html]{ extended_documentation/no_ext_doc.html} \URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf} \URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc} \URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps} */ template<class TSparseSpace, class TDenseSpace, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class TrilinosBlockBuilderAndSolver : public BuilderAndSolver< TSparseSpace,TDenseSpace, TLinearSolver > { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION( TrilinosBlockBuilderAndSolver ); typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver > BaseType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor. */ TrilinosBlockBuilderAndSolver( Epetra_MpiComm& Comm, int guess_row_size, typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >(pNewLinearSystemSolver) , mrComm(Comm),mguess_row_size(guess_row_size) { /* std::cout << "using the standard builder and solver " << std::endl; */ } // TrilinosBlockBuilderAndSolver( // ) // : BaseType(typename LinearSolver<TSparseSpace,TDenseSpace>::Pointer(new LinearSolver<TSparseSpace,TDenseSpace>)) // { // // /* std::cout << "using the standard builder and solver " << std::endl; */ // // } /** Destructor. */ virtual ~TrilinosBlockBuilderAndSolver() {} /*@} */ /**@name Operators */ /*@{ */ //************************************************************************** //************************************************************************** void Build( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b) override { KRATOS_TRY if(!pScheme) KRATOS_ERROR << "No scheme provided!"; //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero(*BaseType::mpReactionsVector); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // int rank = A.Comm().MyPID(); //getting the processor Id ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleLHS(A,LHS_Contribution,EquationId); TSparseSpace::AssembleRHS(b,RHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0,0,false); RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleLHS(A,LHS_Contribution,EquationId); TSparseSpace::AssembleRHS(b,RHS_Contribution,EquationId); } //finalizing the assembly A.GlobalAssemble(); b.GlobalAssemble(); KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildLHS( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A) override { KRATOS_TRY //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions // TSparseSpace::SetToZero(BaseType::mReactionsVector); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleLHS(A,LHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0,0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleLHS(A,LHS_Contribution,EquationId); } //finalizing the assembly A.GlobalAssemble(); KRATOS_CATCH("") } //************************************************************************** //************************************************************************** /** Solve the linear problem. */ void SystemSolveWithPhysics( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, ModelPart& r_model_part ) { KRATOS_TRY double norm_b; if(TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if(norm_b != 0.00) { if (this->GetEchoLevel()>1) if (mrComm.MyPID() == 0) KRATOS_WATCH("entering in the solver"); if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() ) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, r_model_part); if (this->GetEchoLevel()>3) { EpetraExt::RowMatrixToMatrixMarketFile( "A.mm", A, "matrixA", "lhs_matrix", true); EpetraExt::MultiVectorToMatrixMarketFile( "b.mm", b, "vectorb","rhs_vector",true); KRATOS_ERROR << "Stopping after printing the matrix"; } if (this->GetEchoLevel()>3) { EpetraExt::RowMatrixToMatrixMarketFile( "A.mm", A, "matrixA", "block_matrix", true); EpetraExt::MultiVectorToMatrixMarketFile( "b.mm", b, "vectorb","rhs_vector",true); KRATOS_ERROR << "Stopping after printing the matrix"; } BaseType::mpLinearSystemSolver->Solve(A,Dx,b); } else { TSparseSpace::SetToZero(Dx); } //prints informations about the current time if (this->GetEchoLevel()>1) { std::cout << *(BaseType::mpLinearSystemSolver) << std::endl; } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** /** Build and solve the linear problem. */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY boost::timer building_time; Build(pScheme,r_model_part,A,b); if(BaseType::GetEchoLevel()>0) { if(this->mrComm.MyPID() == 0) std::cout << "Building Time : " << building_time.elapsed() << std::endl; } //apply dirichlet conditions ApplyDirichletConditions(pScheme,r_model_part,A,Dx,b); if (BaseType::GetEchoLevel()== 3) { std::cout << "before the solution of the system" << std::endl; std::cout << "System Matrix = " << A << std::endl; std::cout << "unknowns vector = " << Dx << std::endl; std::cout << "RHS vector = " << b << std::endl; } boost::timer solve_time; SystemSolveWithPhysics(A,Dx,b,r_model_part); if(BaseType::GetEchoLevel()>0) { if(this->mrComm.MyPID() == 0) std::cout << "System Solve Time : " << solve_time.elapsed() << std::endl; } if (BaseType::GetEchoLevel()== 3) { std::cout << "after the solution of the system" << std::endl; std::cout << "System Matrix = " << A << std::endl; std::cout << "unknowns vector = " << Dx << std::endl; std::cout << "RHS vector = " << b << std::endl; } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** /** Build right-hand side and solve the linear problem. */ void BuildRHSAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY BuildRHS(pScheme,r_model_part,b); SystemSolveWithPhysics(A,Dx,b,r_model_part); KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemVectorType& b) override { KRATOS_TRY //Getting the Elements ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //resetting to zero the vector of reactions // TSparseSpace::SetToZero(BaseType::mReactionsVector); //contributions to the system LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleRHS(b,RHS_Contribution,EquationId); } RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleRHS(b,RHS_Contribution,EquationId); } //finalizing the assembly b.GlobalAssemble(); KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part ) override { KRATOS_TRY //Gets the array of elements from the modeler ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements(); /* ElementsArrayType& pElements = r_model_part.Elements(ModelPart::Kratos_Local); */ Element::DofsVectorType ElementalDofList; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { // gets list of Dof involved on every element pScheme->GetElementalDofList(*it,ElementalDofList,CurrentProcessInfo); for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i) { Doftemp.push_back( i->get() ); } } //taking in account conditions ConditionsArrayType& pConditions = r_model_part.Conditions(); for (typename ConditionsArrayType::ptr_iterator it=pConditions.ptr_begin(); it!=pConditions.ptr_end(); ++it) { // gets list of Dof involved on every element pScheme->GetConditionDofList(*it,ElementalDofList,CurrentProcessInfo); for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i) { Doftemp.push_back( i->get() ); } } Doftemp.Unique(); BaseType::mDofSet = Doftemp; //throws an execption if there are no Degrees of freedom involved in the analysis if (BaseType::mDofSet.size()==0) KRATOS_ERROR << "No degrees of freedom!"; // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl << "Node : "<<dof_iterator->Id()<< std::endl << "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl; } } #endif BaseType::mDofSetIsInitialized = true; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpSystem( ModelPart& r_model_part ) override { // Set equation id for degrees of freedom int free_size = 0; //int fixed_size = 0; int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); // Calculating number of fixed and free dofs for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if(dof_iterator->GetSolutionStepValue(PARTITION_INDEX) == rank) { free_size++; } // Calculating the total size and required offset //int fixed_offset; int free_offset; int global_size; // The correspounding offset by the sum of the sizes in thread with inferior rank MPI_Scan(&free_size, &free_offset, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); // The total size by the sum of all size in all threads MPI_Allreduce(&free_size, &global_size, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); // finding the offset for the begining of the partition free_offset -= free_size; // Now setting the equation id with . for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if(dof_iterator->GetSolutionStepValue(PARTITION_INDEX) == rank) { dof_iterator->SetEquationId(free_offset++); // std::cout << rank << " : set eq. id for dof " << dof_iterator->Id() << " to " << dof_iterator->EquationId() << std::endl; } BaseType::mEquationSystemSize = global_size; mLocalSystemSize = free_size; if(BaseType::GetEchoLevel()>0){ std::cout << rank << " : BaseType::mEquationSystemSize = " << BaseType::mEquationSystemSize << std::endl; std::cout << rank << " : mLocalSystemSize = " << mLocalSystemSize << std::endl; std::cout << rank << " : free_offset = " << free_offset << std::endl; //std::cout << rank << " : fixed_offset = " << fixed_offset << std::endl; } //by Riccardo ... it may be wrong! mFirstMyId = free_offset-mLocalSystemSize; mLastMyId = mFirstMyId+mLocalSystemSize; r_model_part.GetCommunicator().SynchronizeDofs(); } void UpdateGhostDofs(ModelPart& rThisModelPart) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); // std::cout << rank << " : Strarting UpdateGhostDofs...." << std::endl; //int source=rank; int destination=0; // vector<int>& neighbours_indices = rThisModelPart[NEIGHBOURS_INDICES]; vector<int>& neighbours_indices = rThisModelPart.GetCommunicator().NeighbourIndices(); // std::cout << rank << " starting domain loop " << std::endl; for(unsigned int i_domain = 0 ; i_domain < neighbours_indices.size() ; i_domain++) if((destination = neighbours_indices[i_domain]) >= 0) { // std::cout << rank << " domian #" << i_domain << std::endl; unsigned int send_buffer_size = 0; unsigned int receive_buffer_size = 0; // std::cout << rank; // KRATOS_WATCH(destination); // Calculating send and received buffer size // The interface meshes are stored after all, local and ghost meshes NodesArrayType& r_interface_nodes = rThisModelPart.GetCommunicator().LocalMesh(i_domain).Nodes(); NodesArrayType& r_ghost_nodes = rThisModelPart.GetCommunicator().GhostMesh().Nodes(); // std::cout << rank << " : 2...." << std::endl; for(typename NodesArrayType::iterator i_node = r_interface_nodes.begin(); i_node != r_interface_nodes.end(); ++i_node) send_buffer_size += i_node->GetDofs().size(); // std::cout << rank << " : 3...." << std::endl; for(typename NodesArrayType::iterator i_node = r_ghost_nodes.begin(); i_node != r_ghost_nodes.end(); ++i_node) if(i_node->GetSolutionStepValue(PARTITION_INDEX) == destination) { receive_buffer_size += i_node->GetDofs().size(); } unsigned int position = 0; int* send_buffer = new int[send_buffer_size]; int* receive_buffer = new int[receive_buffer_size]; // Filling the buffer std::cout << rank << " : Filling the buffer...." << std::endl; for(ModelPart::NodeIterator i_node = r_interface_nodes.begin(); i_node != r_interface_nodes.end(); ++i_node) for(ModelPart::NodeType::DofsContainerType::iterator i_dof = i_node->GetDofs().begin() ; i_dof != i_node->GetDofs().end() ; i_dof++) { send_buffer[position++] = i_dof->EquationId(); } MPI_Status status; if(position > send_buffer_size) std::cout << rank << " Error in estimating send buffer size...." << std::endl; int send_tag = 1;//i_domain; int receive_tag = 1;//i_domain; MPI_Sendrecv (send_buffer, send_buffer_size, MPI_INT, destination, send_tag, receive_buffer, receive_buffer_size, MPI_INT, destination, receive_tag, MPI_COMM_WORLD, &status); // std::cout << rank << " : Send and receive Finished" << std::endl; // Updating nodes position = 0; for(ModelPart::NodeIterator i_node = rThisModelPart.GetCommunicator().GhostMesh().NodesBegin() ; i_node != rThisModelPart.GetCommunicator().GhostMesh().NodesEnd() ; i_node++) // for(ModelPart::NodeIterator i_node = rThisModelPart.NodesBegin(ModelPart::Kratos_Ghost) ; // i_node != rThisModelPart.NodesEnd(ModelPart::Kratos_Ghost) ; i_node++) if(i_node->GetSolutionStepValue(PARTITION_INDEX) == destination) for(ModelPart::NodeType::DofsContainerType::iterator i_dof = i_node->GetDofs().begin() ; i_dof != i_node->GetDofs().end() ; i_dof++) { i_dof->SetEquationId(receive_buffer[position++]); } if(position > receive_buffer_size) std::cout << rank << " Error in estimating receive buffer size...." << std::endl; delete [] send_buffer; delete [] receive_buffer; } } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { KRATOS_TRY //~ std::cout << "entering ResizeAndInitializeVectors" << std::endl; //resizing the system vectors and matrix if ( pA == NULL || TSparseSpace::Size1(*pA) == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { //creating a work array unsigned int number_of_local_dofs = mLastMyId - mFirstMyId; int temp_size = number_of_local_dofs; if(temp_size <1000) temp_size = 1000; int* temp = new int[temp_size]; // auto& rElements = rModelPart.Elements(); auto& rConditions = rModelPart.Conditions(); //generate map - use the "temp" array here for(unsigned int i=0; i!=number_of_local_dofs; i++) temp[i] = mFirstMyId+i; Epetra_Map my_map(-1, number_of_local_dofs, temp, 0, mrComm); //create and fill the graph of the matrix --> the temp array is reused here with a different meaning Epetra_FECrsGraph Agraph(Copy, my_map, mguess_row_size); Element::EquationIdVectorType EquationId; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=rElements.ptr_begin(); it!=rElements.ptr_end(); ++it) { pScheme->EquationId(*it, EquationId, CurrentProcessInfo); //filling the list of active global indices (non fixed) unsigned int num_active_indices = 0; for(unsigned int i=0; i<EquationId.size(); i++) { temp[num_active_indices] = EquationId[i]; num_active_indices += 1; } if(num_active_indices != 0) { int ierr = Agraph.InsertGlobalIndices(num_active_indices,temp,num_active_indices, temp); KRATOS_ERROR_IF( ierr < 0 ) << "In " << __FILE__ << ":" << __LINE__ << ": Epetra failure in Graph.InsertGlobalIndices. Error code: " << ierr << std::endl; } } // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=rConditions.ptr_begin(); it!=rConditions.ptr_end(); ++it) { pScheme->Condition_EquationId(*it, EquationId, CurrentProcessInfo); //filling the list of active global indices (non fixed) unsigned int num_active_indices = 0; for(unsigned int i=0; i<EquationId.size(); i++) { temp[num_active_indices] = EquationId[i]; num_active_indices += 1; } if(num_active_indices != 0) { int ierr = Agraph.InsertGlobalIndices(num_active_indices,temp,num_active_indices, temp); KRATOS_ERROR_IF( ierr < 0 ) << "In " << __FILE__ << ":" << __LINE__ << ": Epetra failure in Graph.InsertGlobalIndices. Error code: " << ierr << std::endl; } } //finalizing graph construction int ierr = Agraph.GlobalAssemble(); KRATOS_ERROR_IF( ierr != 0 ) << "In " << __FILE__ << ":" << __LINE__ << ": Epetra failure in Graph.GlobalAssemble, Error code: " << ierr << std::endl; //generate a new matrix pointer according to this graph TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(Copy,Agraph) ); pA.swap(pNewA); //generate new vector pointers according to the given map if( pb == NULL || TSparseSpace::Size(*pb) != BaseType::mEquationSystemSize) { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(my_map) ); pb.swap(pNewb); } if( pDx == NULL || TSparseSpace::Size(*pDx) != BaseType::mEquationSystemSize) { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(my_map) ); pDx.swap(pNewDx); } if( BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(my_map) ); BaseType::mpReactionsVector.swap(pNewReactionsVector); } delete [] temp; } else if (BaseType::mpReactionsVector == nullptr && this->mCalculateReactionsFlag) { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(pDx->Map()) ); BaseType::mpReactionsVector.swap(pNewReactionsVector); } else { if(TSparseSpace::Size1(*pA) == 0 || TSparseSpace::Size1(*pA) != BaseType::mEquationSystemSize || TSparseSpace::Size2(*pA) != BaseType::mEquationSystemSize) { KRATOS_ERROR << "It should not come here resizing is not allowed this way!!!!!!!! ... "; } } //if needed resize the vector for the calculation of reactions // if(BaseType::mCalculateReactionsFlag == true) // { // // KRATOS_THROW_ERROR(std::logic_error,"calculation of reactions not yet implemented with Trilinos",""); // } //~ std::cout << "finished ResizeAndInitializeVectors" << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void InitializeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void FinalizeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { } //************************************************************************** //************************************************************************** void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { TSparseSpace::SetToZero(b); //refresh RHS to have the correct reactions BuildRHS(pScheme, r_model_part, b); //initialize the Epetra importer // TODO: this part of the code has been pasted until a better solution is found int system_size = TSparseSpace::Size(b); int number_of_dofs = BaseType::mDofSet.size(); std::vector< int > index_array(number_of_dofs); //filling the array with the global ids int counter = 0; for(typename DofsArrayType::iterator i_dof = BaseType::mDofSet.begin(); i_dof != BaseType::mDofSet.end(); ++i_dof) { int id = i_dof->EquationId(); if( id < system_size ) { index_array[counter] = id; counter += 1; } } std::sort(index_array.begin(),index_array.end()); std::vector<int>::iterator NewEnd = std::unique(index_array.begin(),index_array.end()); index_array.resize(NewEnd-index_array.begin()); int check_size = -1; int tot_update_dofs = index_array.size(); b.Comm().SumAll(&tot_update_dofs,&check_size,1); if ( (check_size < system_size) && (b.Comm().MyPID() == 0) ) { KRATOS_ERROR << "Dof count is not correct. There are less dofs than expected.\n" << "Expected number of active dofs = " << system_size << " dofs found = " << check_size ; } //defining a map as needed Epetra_Map dof_update_map(-1,index_array.size(), &(*(index_array.begin())),0,b.Comm() ); //defining the importer class Kratos::shared_ptr<Epetra_Import> pDofImporter = Kratos::make_shared<Epetra_Import>(dof_update_map,b.Map()); //defining a temporary vector to gather all of the values needed Epetra_Vector temp_RHS(pDofImporter->TargetMap()); //importing in the new temp_RHS vector the values int ierr = temp_RHS.Import(b, *pDofImporter, Insert); if(ierr != 0) KRATOS_ERROR << "Epetra failure found - error code: " << ierr; double* temp_RHS_values; //DO NOT make delete of this one!! temp_RHS.ExtractView(&temp_RHS_values); b.Comm().Barrier(); const int ndofs = static_cast<int>(BaseType::mDofSet.size()); // store the RHS values in the reaction variable //NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver #pragma omp parallel for firstprivate(ndofs) for (int k = 0; k<ndofs; k++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k; const int i = (dof_iterator)->EquationId(); // (dof_iterator)->GetSolutionStepReactionValue() = -(*b[i]); const double react_val = temp_RHS[pDofImporter->TargetMap().LID(i)]; (dof_iterator->GetSolutionStepReactionValue()) = -react_val; } } void BuildLHS_CompleteOnFreeRows( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A) override { KRATOS_ERROR << "method BuildLHS_CompleteOnFreeRows not implemented in Trilinos Builder And Solver"; } //************************************************************************** //************************************************************************** void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); //loop over all dofs to find the fixed ones std::vector<int> global_ids(BaseType::mDofSet.size()); std::vector<int> is_dirichlet(BaseType::mDofSet.size()); unsigned int i=0; for (typename DofsArrayType::iterator dof_it = BaseType::mDofSet.begin(); dof_it != BaseType::mDofSet.end(); ++dof_it) { const int global_id = dof_it->EquationId(); global_ids[i] = global_id; if( dof_it->IsFixed() ) is_dirichlet[i] = 1; else is_dirichlet[i] = 0; i++; } //here we construct and fill a vector "fixed local" which cont Epetra_Map localmap( -1, global_ids.size(), global_ids.data(), 0, A.Comm() ); Epetra_IntVector fixed_local( Copy, localmap, is_dirichlet.data() ); Epetra_Import dirichlet_importer(A.ColMap(), fixed_local.Map()); //defining a temporary vector to gather all of the values needed Epetra_IntVector fixed( A.ColMap() ); //importing in the new temp vector the values int ierr = fixed.Import(fixed_local,dirichlet_importer,Insert); if(ierr != 0) KRATOS_ERROR << "Epetra failure found"; /* //now fill the local bitarray employed to store the dirichlet rows and cols in local numeration //dirichlet_rows will be numbered according to A.RowMap() //dirichlet_cols will be numbered according to A.ColMap() std::vector< int > mdirichlet_rows( A.NumMyRows()); std::vector< int > mdirichlet_cols( fixed.MyLength() ); KRATOS_WATCH(mdirichlet_rows.size()) unsigned int counter = 0; for(unsigned int i=0; i<mdirichlet_rows.size(); i++) { int lid = localmap.LID( A.RowMap().GID(i) ); if(lid < 0) KRATOS_THROW_ERROR(std::runtime_error," a negative lid was found",""); if( fixed_local[lid] == 0) mdirichlet_rows[i] = false; else { mdirichlet_rows[i] = true; counter++; } } KRATOS_WATCH(counter); for(unsigned int i=0; i< mdirichlet_cols.size(); i++) { if(fixed[i] == 0) mdirichlet_cols[i] = false; else mdirichlet_cols[i] = true; } */ // KRATOS_WATCH(A.NumMyRows()) for (int i=0; i < A.NumMyRows(); i++) { int numEntries; // number of non-zero entries double *vals; // row non-zero values int *cols; // column indices of row non-zero values A.ExtractMyRowView(i,numEntries,vals,cols); int row_gid = A.RowMap().GID(i); int row_lid = localmap.LID(row_gid); if( fixed_local[row_lid] == 0 ) //not a dirichlet row { for (int j=0; j < numEntries; j++) { if(fixed[ cols[j] ] == true) vals[j] = 0.0; } } else //this IS a dirichlet row { //set to zero the rhs b[0][i] = 0.0; //note that the index of i is expected to be coherent with the rows of A //set to zero the whole row for (int j=0; j < numEntries; j++) { int col_gid = A.ColMap().GID(cols[j]); if (col_gid != row_gid) vals[j] = 0.0; } } } // // for (int i=0; i < A.NumMyRows(); i++) { // int numEntries; // double *vals; // int *cols; // A.ExtractMyRowView(i,numEntries,vals,cols); // // int row_gid = A.RowMap().GID(i); // int row_lid = dofmap.LID( row_gid ); // // if(row_lid < 0) // KRATOS_WATCH("not working :-("); // // if(fixed[row_lid] == 0) //not a dirichlet Row // { // for (int j=0; j < numEntries; j++) // { // const int col_gid = A.ColMap().GID( cols[j] ); // const int col_lid = dofmap.LID( col_gid ); // if(col_lid < 0) // std::cout << " pid="<<A.Comm().MyPID() << " cols[j] = " << cols[j] << " gid= " << col_gid << " lid=" << col_lid << std::endl; // // if(fixed[ col_lid ] > 0) vals[j] = 0.0; // } // } // else //this IS a dirichlet row // { // //set to zero the rhs // b[0][i] = 0.0; //note that the index of i is expected to be coherent with the rows of A // // //set to zero the whole row except the diag // for (int j=0; j < numEntries; j++) // { // const int col_gid = A.ColMap().GID( cols[j] ); // const int col_lid = dofmap.LID( col_gid ); // if(col_gid == row_gid) // vals[j] = 1; // else // vals[j] = 0; // } // } // } //std::cout << "finished modifying A for dirichlet" << std::endl; /* int NumEntries; // number of nonzero entries extracted std::vector<unsigned int> fixed_ids; fixed_ids.reserve(1000); for (typename DofsArrayType::iterator dof_it = BaseType::mDofSet.begin(); dof_it != BaseType::mDofSet.end(); ++dof_it) { if (dof_it->IsFixed()) fixed_ids.push_back(dof_it->EquationId()); if(dof_it->GetSolutionStepValue(PARTITION_INDEX) == rank) { if (dof_it->IsFixed()) { int GlobalRow = dof_it->EquationId(); // row to extract int Length = A.NumGlobalEntries(dof_it->EquationId()); // length of Values and Indices double* Values = new double[Length]; // extracted values for this row int* Indices = new int[Length]; // extracted global column indices for the corresponding values A.ExtractGlobalRowCopy(GlobalRow, Length, NumEntries, Values, Indices); // put 0.0 in each row A[ii] and 1.0 on the diagonal for (int ii=0; ii<Length; ii++) { if (Indices[ii] == GlobalRow) Values[ii]=1.0; else Values[ii]=0.0; } A.ReplaceGlobalValues(GlobalRow, Length, Values, Indices); // redo better !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! int* gb= new int[1]; gb[0]=GlobalRow; A.ReplaceGlobalValues(Length, Indices, 1, gb, Values); double* bb=new double[1]; bb[0]=0.0; // put 0.0 in vector b[GlobalRow] if GlobalRow is a fixed dof b.ReplaceGlobalValues(1,gb,bb); delete [] Values; delete [] Indices; delete [] gb; delete [] bb; } } } //now set the columns to zero for (typename DofsArrayType::iterator dof_it = BaseType::mDofSet.begin(); dof_it != BaseType::mDofSet.end(); ++dof_it) { if(dof_it->GetSolutionStepValue(PARTITION_INDEX) == rank) { if ( ! dof_it->IsFixed()) //NOT FIXED!! { int GlobalRow = dof_it->EquationId(); // row to extract int Length = A.NumGlobalEntries(dof_it->EquationId()); // length of Values and Indices double* Values = new double[Length]; // extracted values for this row int* Indices = new int[Length]; // extracted global column indices for the corresponding values A.ExtractGlobalRowCopy(GlobalRow, Length, NumEntries, Values, Indices); // put 0.0 in each row A[ii] and 1.0 on the diagonal for (int ii=0; ii<Length; ii++) { if ( std::find(fixed_ids.begin(), fixed_ids.end(), Indices[ii]) != fixed_ids.end() )//if the node is in the fixed list Values[ii]=0.0; } A.ReplaceGlobalValues(GlobalRow, Length, Values, Indices); delete [] Values; delete [] Indices; } } }*/ KRATOS_CATCH(""); } //************************************************************************** //************************************************************************** void ApplyPointLoads( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemVectorType& b) override {} /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ Epetra_MpiComm& mrComm; int mguess_row_size; unsigned int mLocalSystemSize; int mFirstMyId; int mLastMyId; /*@} */ /**@name Protected Operators*/ /*@{ */ /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { KRATOS_ERROR << "This method is not implemented for Trilinos"; } /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class TrilinosBlockBuilderAndSolver */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_TRILINOS_BLOCK_BUILDER_AND_SOLVER defined */
version1_2.c
// Compile with: // // // To specify the number of bodies in the world, the program optionally accepts // an integer as its first command line argument. #include <time.h> #include <sys/times.h> #include <math.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <X11/Xlib.h> #include <unistd.h> #include "omp.h" #define WIDTH 1024 #define HEIGHT 768 // default number of bodies #define DEF_NUM_BODIES 200 // gravitational constant #define GRAV 10.0 // initial velocities are scaled by this value #define V_SCALAR 20.0 // initial masses are scaled by this value #define M_SCALAR 5.0 // radius scalar #define R_SCALAR 3 // coefficient of restitution determines the elasticity of a collision: C_REST = [0,1] // if C_REST = 0 -> perfectly inelastic (particles stick together) // if C_REST = 1 -> perfectly elastic (no loss of speed) #define C_REST 0.5 // set the iteration times #define iteration_times 100 // Must set 0 if run on Pi #define NOT_RUN_ON_PI 1 struct body { double x, y; // position double m; // mass }; struct speed{ double vx,vy; }; struct world { struct body *bodies; int num_bodies; }; struct speed* speed; double * radius; clock_t total_time = 0; //total_time.sec = 0; //total_time.usec = 0; /* This function initializes each particle's mass, velocity and position */ struct world *create_world(int num_bodies) { struct world *world = malloc(sizeof(struct world)); speed =malloc(sizeof(struct speed)*num_bodies); radius=malloc(sizeof(double)*num_bodies); world->num_bodies = num_bodies; world->bodies = malloc(sizeof(struct body) * num_bodies); int i = 0; double x; double y; double rc; int min_dim = (WIDTH < HEIGHT) ? WIDTH : HEIGHT; while (i < num_bodies) { x = drand48() * WIDTH; y = drand48() * HEIGHT; rc = sqrt((WIDTH / 2 - x) * (WIDTH / 2 - x) + (y - HEIGHT / 2) * (y - HEIGHT / 2)); if (rc <= min_dim / 2) { world->bodies[i].x = x; world->bodies[i].y = y; speed[i].vx = V_SCALAR * (y - HEIGHT / 2) / rc; speed[i].vy = V_SCALAR * (WIDTH / 2 - x) / rc; world->bodies[i].m = (1 / (0.025 + drand48())) * M_SCALAR; radius[i] = sqrt(world->bodies[i].m / M_PI) * R_SCALAR; i++; } } return world; } // set the foreground color given RGB values between 0..255. void set_color(Display *disp, GC gc, int r, int g, int b) { unsigned long int p; if (r < 0) r = 0; else if (r > 255) r = 255; if (g < 0) g = 0; else if (g > 255) g = 255; if (b < 0) b = 0; else if (b > 255) b = 255; p = (r << 16) | (g << 8) | (b); XSetForeground(disp, gc, p); } /* This function updates the screen with the new positions of each particle */ void draw_world(Display *disp, Pixmap back_buf, GC gc, struct world *world) { int i; double x, y, r, r2; // we turn off aliasing for faster draws set_color(disp, gc, 255, 255, 255); XFillRectangle(disp, back_buf, gc, 0, 0, WIDTH, HEIGHT); for (i = 0; i < world->num_bodies; i++) { r = radius[i]; x = world->bodies[i].x - r; y = world->bodies[i].y - r; r2 = r + r; // draw body set_color(disp, gc, 255 * 7 / 10, 255 * 7 / 10, 255 * 7 / 10); XFillArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64); set_color(disp, gc, 0, 0, 0); XDrawArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64); } } void collision_step(struct world *world) { int a, b; double r, x, y, vx, vy; // Impose screen boundaries by reversing direction if body is off screen for (a = 0; a < world->num_bodies; a++) { r = radius[a]; x = world->bodies[a].x; y = world->bodies[a].y; vx = speed[a].vx; vy = speed[a].vy; if (x - r < 0) { // left edge if (vx < 0) { speed[a].vx = -C_REST * vx; } world->bodies[a].x = r; } else if (x + r > WIDTH) { // right edge if (vx > 0) { speed[a].vx = -C_REST * vx; } world->bodies[a].x = WIDTH - r; } if (y - r < 0) { // bottom edge if (vy < 0) { speed[a].vy = -C_REST * vy; } world->bodies[a].y = r; } else if (y + r > HEIGHT) { // top edge if (vy > 0) { speed[a].vy = -C_REST * vy; } world->bodies[a].y = HEIGHT - r; } } } void position_step(struct world *world, double time_res) { /* The forces array stores the x and y components of the total force acting * on each body. The forces are index like this: * F on body i in the x dir = F_x[i] * F on body i in the y dir = F_y[i] */ double *force_x = (double *) malloc(sizeof(double) * world->num_bodies); double *force_y = (double *) malloc(sizeof(double) * world->num_bodies); // initialize all forces to zero force_x = memset(force_x, 0, sizeof(double) * world->num_bodies); force_y = memset(force_y, 0, sizeof(double) * world->num_bodies); /* Compute the net force on each body */ #pragma omp parallel for for (int i = 0; i < world->num_bodies; i++) { double d, d_cubed, diff_x, diff_y; for (int j = 0; j < world->num_bodies; j++) { if (i == j) { continue; } // Compute the x and y distances and total distance d between // bodies i and j diff_x = world->bodies[j].x - world->bodies[i].x; diff_y = world->bodies[j].y - world->bodies[i].y; d = sqrt((diff_x * diff_x) + (diff_y * diff_y)); if (d < 25) { d = 25; } d_cubed = d * d * d; // Add force due to j to total force on i force_x[i] += GRAV * (world->bodies[i].m * world->bodies[j].m / d_cubed) * diff_x; force_y[i] += GRAV * (world->bodies[i].m * world->bodies[j].m / d_cubed) * diff_y; } } #pragma omp barrier // Update the velocity and position of each body #pragma omp parallel for for (int i = 0; i < world->num_bodies; i++) { // Update velocities speed[i].vx += force_x[i] * time_res / world->bodies[i].m; speed[i].vy += force_y[i] * time_res / world->bodies[i].m; // Update positions world->bodies[i].x += speed[i].vx * time_res; world->bodies[i].y += speed[i].vy * time_res; } } void step_world(struct world *world, double time_res) { struct tms ttt; clock_t start, end; start = times(&ttt); position_step(world, time_res); end = times(&ttt); total_time += end - start; collision_step(world); } /* Main method runs initialize() and update() */ int main(int argc, char **argv) { //total_time.tv_sec = 0; //total_time.tv_usec = 0; /* get num bodies from the command line */ int num_bodies, threads; num_bodies = DEF_NUM_BODIES; threads = 1; if (argc == 2) { num_bodies = atoi(argv[1]); }; int thread_list[13]={1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40}; FILE* fstream=fopen("outdata","a+"); fprintf(fstream,"Universe has %d bodies\n", num_bodies); for (int i = 0; i < 13; ++i) { threads = thread_list[i]; printf("Universe has %d bodies. %d Threads\n", num_bodies, threads); omp_set_num_threads(threads); /* set up the universe */ time_t cur_time; time(&cur_time); srand48((long) cur_time); // seed the RNG used in create_world struct world *world = create_world(num_bodies); /* set up graphics using Xlib */ #if NOT_RUN_ON_PI Display *disp = XOpenDisplay(NULL); int scr = DefaultScreen(disp); Window win = XCreateSimpleWindow( disp, RootWindow(disp, scr), 0, 0, WIDTH, HEIGHT, 0, BlackPixel(disp, scr), WhitePixel(disp, scr)); XStoreName(disp, win, "N-Body Simulator"); Pixmap back_buf = XCreatePixmap(disp, RootWindow(disp, scr), WIDTH, HEIGHT, DefaultDepth(disp, scr)); GC gc = XCreateGC(disp, back_buf, 0, 0); // Make sure we're only looking for messages about closing the window Atom del_window = XInternAtom(disp, "WM_DELETE_WINDOW", 0); XSetWMProtocols(disp, win, &del_window, 1); XSelectInput(disp, win, StructureNotifyMask); XMapWindow(disp, win); XEvent event; // wait until window is mapped while (1) { XNextEvent(disp, &event); if (event.type == MapNotify) { break; } } #endif struct timespec delay = {0, 1000000000 / 60}; // for 60 FPS struct timespec remaining; double delta_t = 0.1; int ii; total_time=0; for (ii = 0; ii < iteration_times; ii++) { // check if the window has been closed #if NOT_RUN_ON_PI if (XCheckTypedEvent(disp, ClientMessage, &event)) { break; } // we first draw to the back buffer then copy it to the front (`win`) draw_world(disp, back_buf, gc, world); XCopyArea(disp, back_buf, win, gc, 0, 0, WIDTH, HEIGHT, 0, 0); #endif step_world(world, delta_t); //if you want to watch the process in 60 FPS //nanosleep(&delay, &remaining); } // printf("Total Time = %f\n", (double)total_time.tv_sec + (double)total_time.tv_usec/1000000); fprintf(fstream,"%d %lfs\n", threads,(double) total_time / (sysconf(_SC_CLK_TCK))); #if NOT_RUN_ON_PI XFreeGC(disp, gc); XFreePixmap(disp, back_buf); XDestroyWindow(disp, win); XCloseDisplay(disp); #endif } fclose(fstream); return 0; }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
Stencil_par2.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "malloc2D.h" #include "timer.h" int main(int argc, char *argv[]) { struct timespec tstart_cpu, tstop_cpu; double cpu_time; int imax=2002, jmax = 2002; int niter=1000, nburst=100; double** restrict x = malloc2D(jmax, imax); double** restrict xnew = malloc2D(jmax, imax); #pragma omp target enter data map(to:x[0:jmax][0:imax], xnew[0:jmax][0:imax]) #pragma omp target teams { #pragma omp distribute parallel for simd for (int j = 0; j < jmax; j++){ for (int i = 0; i < imax; i++){ xnew[j][i] = 0.0; x[j][i] = 5.0; } } #pragma omp distribute parallel for simd for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){ for (int i = imax/2 - 5; i < imax/2 -1; i++){ x[j][i] = 400.0; } } } // omp target teams for (int iter = 0; iter < niter; iter+=nburst){ for (int ib = 0; ib < nburst; ib++){ cpu_timer_start(&tstart_cpu); #pragma omp target teams distribute parallel for simd for (int j = 1; j < jmax-1; j++){ for (int i = 1; i < imax-1; i++){ xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0; } } #pragma omp target teams distribute parallel for simd for (int j = 0; j < jmax; j++){ for (int i = 0; i < imax; i++){ x[j][i] = xnew[j][i]; } } cpu_time += cpu_timer_stop(tstart_cpu); } printf("Iter %d\n",iter+nburst); } #pragma omp target exit data map(from:x[0:jmax][0:imax], xnew[0:jmax][0:imax]) free(x); free(xnew); printf("Timing is %lf\n",cpu_time); }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
convolution_winograd_transform_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _r06 = vcvt_f32_bf16(vld1_u16(r0 + 24)); float32x4_t _r07 = vcvt_f32_bf16(vld1_u16(r0 + 28)); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); vst1q_f32(tmp[7][m], _tmp7m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } static void conv3x3s1_winograd63_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[6][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; const float* output0_tm_6 = output0_tm_0 + tiles * 24; const float* output0_tm_7 = output0_tm_0 + tiles * 28; unsigned short* output0 = out0.row<unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _out0tm6 = vld1q_f32(output0_tm_6); float32x4_t _out0tm7 = vld1q_f32(output0_tm_7); float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6); float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)); float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04); float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06); float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f))); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f))); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); vst1_u16(output0 + 16, vcvt_bf16_f32(_out04)); vst1_u16(output0 + 20, vcvt_bf16_f32(_out05)); output0 += outw * 4; } } } } } static void conv3x3s1_winograd43_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f); float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f); float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f); float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 24; r0_tm_1 += tiles * 24; r0_tm_2 += tiles * 24; r0_tm_3 += tiles * 24; r0_tm_4 += tiles * 24; r0_tm_5 += tiles * 24; } } } } } static void conv3x3s1_winograd43_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[4][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; unsigned short* output0 = out0.row<unsigned short>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b); float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f); float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f); float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f)); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); output0 += outw * 4; } } } } } static void conv3x3s1_winograd23_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 2; const int h_tiles = (h - 2) / 2; const int tiles = w_tiles * h_tiles; // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; // 0 = r00 - r02 // 1 = r01 + r02 // 2 = r02 - r01 // 3 = r03 - r01 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[4][4][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 2) + (j * 2) * 4; for (int m = 0; m < 4; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _tmp0m = vsubq_f32(_r00, _r02); float32x4_t _tmp1m = vaddq_f32(_r01, _r02); float32x4_t _tmp2m = vsubq_f32(_r02, _r01); float32x4_t _tmp3m = vsubq_f32(_r03, _r01); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; for (int m = 0; m < 4; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _r0tm0 = vsubq_f32(_tmp00, _tmp02); float32x4_t _r0tm1 = vaddq_f32(_tmp01, _tmp02); float32x4_t _r0tm2 = vsubq_f32(_tmp02, _tmp01); float32x4_t _r0tm3 = vsubq_f32(_tmp03, _tmp01); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); r0_tm_0 += tiles * 16; r0_tm_1 += tiles * 16; r0_tm_2 += tiles * 16; r0_tm_3 += tiles * 16; } } } } } static void conv3x3s1_winograd23_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 2; const int h_tiles = outh / 2; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r00 + r01 + r02 // 1 = r01 - r02 + r03 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[2][4][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; unsigned short* output0 = out0.row<unsigned short>(i * 2) + (j * 2) * 4; for (int m = 0; m < 4; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _out0tm1), _out0tm2); float32x4_t _tmp1m = vaddq_f32(vsubq_f32(_out0tm1, _out0tm2), _out0tm3); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); output0_tm_0 += tiles * 16; output0_tm_1 += tiles * 16; output0_tm_2 += tiles * 16; output0_tm_3 += tiles * 16; } for (int m = 0; m < 2; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp01), _tmp02)); float32x4_t _out01 = vaddq_f32(_bias0, vaddq_f32(vsubq_f32(_tmp01, _tmp02), _tmp03)); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); output0 += outw * 4; } } } } }
QED_AEG.h
#pragma once #include "Constants.h" #include "Ensemble.h" #include "Grid.h" #include "Pusher.h" #include "synchrotron.h" #include <omp.h> #include <random> using namespace pfc; using namespace constants; class ScalarQED_AEG_only_electron : public ScalarParticlePusher { public: ScalarQED_AEG_only_electron() { MinProbability = 5e-4; MaxProbability = 0.01; SchwingerField = sqr(Constants<FP>::electronMass() * Constants<FP>::lightVelocity()) * Constants<FP>::lightVelocity() / (-Constants<FP>::electronCharge() * Constants<FP>::planck()); preFactor = sqr(Constants<FP>::electronCharge()) * Constants<FP>::electronMass() * Constants<FP>::lightVelocity() / sqr(Constants<FP>::planck()); coeffPhoton_probability = 1.0; coeffPair_probability = 0.0; distribution = std::uniform_real_distribution<FP>(0.0, 1.0); int max_threads; #ifdef __USE_OMP__ max_threads = omp_get_max_threads(); #else max_threads = 1; #endif AvalanchePhotons.resize(max_threads); AvalancheParticles.resize(max_threads); afterAvalanchePhotons.resize(max_threads); afterAvalancheParticles.resize(max_threads); } void processParticlesNIter(Ensemble3d* particles, pyYeeGrid* grid, FP timeStep, FP startTime, int N) { for (int i = 0; i < N; i++) { grid->setTime(startTime + i * timeStep); processParticles(particles, grid, timeStep); } } void processParticles(Ensemble3d* particles, pyYeeGrid* grid, FP timeStep) { int max_threads; #ifdef __USE_OMP__ max_threads = omp_get_max_threads(); #else max_threads = 1; #endif for (int th = 0; th < max_threads; th++) { AvalanchePhotons[th].clear(); AvalancheParticles[th].clear(); afterAvalanchePhotons[th].clear(); afterAvalancheParticles[th].clear(); } if ((*particles)[Photon].size() && coeffPair_probability != 0) HandlePhotons((*particles)[Photon], grid, timeStep); if ((*particles)[Electron].size() && coeffPhoton_probability != 0) HandleParticles((*particles)[Electron], grid, timeStep); if ((*particles)[Positron].size() && coeffPhoton_probability != 0) HandleParticles((*particles)[Positron], grid, timeStep); for (int th = 0; th < max_threads; th++) { for (int ind = 0; ind < afterAvalanchePhotons[th].size(); ind++) { particles->addParticle(afterAvalanchePhotons[th][ind]); } for (int ind = 0; ind < afterAvalancheParticles[th].size(); ind++) { particles->addParticle(afterAvalancheParticles[th][ind]); } } } void Boris(Particle3d&& particle, const FP3 &e, const FP3 &b, FP timeStep) { FP eCoeff = timeStep * particle.getCharge() / (2 * particle.getMass()*Constants<FP>::lightVelocity()); FP3 eMomentum = e * eCoeff; FP3 um = particle.getP() + eMomentum; FP3 t = b * eCoeff / sqrt((FP)1 + um.norm2()); FP3 uprime = um + cross(um, t); FP3 s = t * (FP)2 / ((FP)1 + t.norm2()); particle.setP(eMomentum + um + cross(uprime, s)); particle.setPosition(particle.getPosition() + timeStep * particle.getVelocity()); } void Boris(ParticleProxy3d&& particle, const FP3 &e, const FP3 &b, FP timeStep) { FP eCoeff = timeStep * particle.getCharge() / (2 * particle.getMass()*Constants<FP>::lightVelocity()); FP3 eMomentum = e * eCoeff; FP3 um = particle.getP() + eMomentum; FP3 t = b * eCoeff / sqrt((FP)1 + um.norm2()); FP3 uprime = um + cross(um, t); FP3 s = t * (FP)2 / ((FP)1 + t.norm2()); particle.setP(eMomentum + um + cross(uprime, s)); particle.setPosition(particle.getPosition() + timeStep * particle.getVelocity()); } void HandlePhotons(ParticleArray3d& particles, YeeGrid* grid, FP timeStep) { FP dt = timeStep; #pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < particles.size(); i++) { int thread_id; #ifdef __USE_OMP__ thread_id = omp_get_thread_num(); #else thread_id = 0; #endif FP3 pPos = particles[i].getPosition(); FP3 k = particles[i].getVelocity(); FP3 e, b; e = grid->getE(pPos); b = grid->getB(pPos); k = (1 / k.norm()) * k; // normalized wave vector particles[i].setPosition(pPos + dt * Constants<FP>::lightVelocity() * k); FP H_eff = sqrt(sqr(e + VP(k, b)) - sqr(SP(e, k))); FP HE = H_eff / SchwingerField; FP pGamma = particles[i].getMomentum().norm() / (Constants<FP>::electronMass() * Constants<FP>::lightVelocity()); FP EstimatedProbability = dt * estimatedPhotons(HE, pGamma); FP Factor = 1; if (EstimatedProbability < MinProbability) { FP r0 = random_number_omp(); if (r0 > EstimatedProbability / MinProbability) continue; else Factor = MinProbability / EstimatedProbability; } if (EstimatedProbability < MaxProbability) { //=======handle single event======== double gamma = pGamma; double chi = gamma * H_eff / SchwingerField; double delta = Pair_Generator(Factor, chi, gamma, dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Electron); NewParticle.setWeight(particles[i].getWeight()); NewParticle.setPosition(particles[i].getPosition()); NewParticle.setMomentum(delta*particles[i].getMomentum()); afterAvalancheParticles[thread_id].push_back(NewParticle); NewParticle.setType(Positron); NewParticle.setMomentum((1 - delta)*particles[i].getMomentum()); afterAvalancheParticles[thread_id].push_back(NewParticle); //deletePhoton } } else { //=======handle avalanche======== AvalancheParticles[thread_id].clear(); AvalanchePhotons[thread_id].clear(); AvalanchePhotons[thread_id].push_back(particles[i]); particles[i].setPosition(particles[i].getPosition() - dt * Constants<FP>::lightVelocity() * k); // go back RunAvalanche(H_eff, e, b, Photon, pGamma, dt); //deletePhoton for (int k = 0; k != AvalanchePhotons[thread_id].size(); k++) afterAvalanchePhotons[thread_id].push_back(AvalanchePhotons[thread_id][k]); for (int k = 0; k != AvalancheParticles[thread_id].size(); k++) afterAvalancheParticles[thread_id].push_back(AvalancheParticles[thread_id][k]); } } } void HandleParticles(ParticleArray3d& particles, YeeGrid* grid, FP timeStep) { FP dt = timeStep; #pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < particles.size(); i++) { int thread_id; #ifdef __USE_OMP__ thread_id = omp_get_thread_num(); #else thread_id = 0; #endif FP3 pPos = particles[i].getPosition(); FP3 v = particles[i].getVelocity(); FP3 e, b; e = grid->getE(pPos); b = grid->getB(pPos); FP H_eff = sqr(e + (1 / Constants<FP>::lightVelocity())*VP(v, b)) - sqr(SP(e, v) / Constants<FP>::lightVelocity()); if (H_eff < 0) H_eff = 0; H_eff = sqrt(H_eff); FP pGamma = particles[i].getGamma(); FP HE = H_eff / SchwingerField; FP EstimatedProbability = dt * estimatedParticles(HE, pGamma); FP Factor = 1; if (EstimatedProbability < MinProbability) { FP r0 = random_number_omp(); if (r0 > EstimatedProbability / MinProbability) { Boris(particles[i], e, b, dt); continue; } else Factor = MinProbability / EstimatedProbability; } if (EstimatedProbability < MaxProbability) { //=======handle single event======== double gamma = pGamma; double chi = gamma * H_eff / SchwingerField; double delta = Photon_MGenerator(Factor, chi, gamma, dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Photon); NewParticle.setWeight(particles[i].getWeight()); NewParticle.setPosition(particles[i].getPosition()); NewParticle.setMomentum(delta*particles[i].getMomentum()); afterAvalanchePhotons[thread_id].push_back(NewParticle); particles[i].setMomentum((1 - delta)*particles[i].getMomentum()); } Boris(particles[i], e, b, dt); } else { //=======handle avalanche======== AvalancheParticles[thread_id].clear(); AvalanchePhotons[thread_id].clear(); AvalancheParticles[thread_id].push_back(particles[i]); RunAvalanche(H_eff, e, b, particles[i].getType(), pGamma, dt); for (int k = 0; k != AvalanchePhotons[thread_id].size(); k++) afterAvalanchePhotons[thread_id].push_back(AvalanchePhotons[thread_id][k]); particles[i].setMomentum(AvalancheParticles[thread_id][0].getMomentum()); particles[i].setPosition(AvalancheParticles[thread_id][0].getPosition()); for (int k = 1; k != AvalancheParticles[thread_id].size(); k++) afterAvalancheParticles[thread_id].push_back(AvalancheParticles[thread_id][k]); } } } void RunAvalanche(double H_eff_global, const FP3 &E, const FP3 &B, int SeedType, double gamma, double dt) { int thread_id; #ifdef __USE_OMP__ thread_id = omp_get_thread_num(); #else thread_id = 0; #endif vector<Particle3d>& AvalancheParticles = this->AvalancheParticles[thread_id]; vector<Particle3d>& AvalanchePhotons = this->AvalanchePhotons[thread_id]; gamma = max(gamma, 1.0); FP HE = H_eff_global / SchwingerField; FP sub_dt = MaxProbability / estimatedParticles(HE, gamma); int NT = 1 + int(dt / sub_dt); sub_dt = dt / FP(NT); for (int i = 0; i != NT; i++) { for (int k = 0; k != AvalancheParticles.size(); k++) { Boris(AvalancheParticles[k], E, B, sub_dt); FP3 v = AvalancheParticles[k].getVelocity(); FP H_eff = sqr(E + (1 / Constants<FP>::lightVelocity())*VP(v, B)) - sqr(SP(E, v) / Constants<FP>::lightVelocity()); if (H_eff < 0) H_eff = 0; H_eff = sqrt(H_eff); FP gamma = AvalancheParticles[k].getGamma(); FP chi = gamma * H_eff / SchwingerField; FP delta = Photon_MGenerator(1, chi, gamma, sub_dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Photon); NewParticle.setWeight(AvalancheParticles[k].getWeight()); NewParticle.setPosition(AvalancheParticles[k].getPosition()); NewParticle.setMomentum(delta*AvalancheParticles[k].getMomentum()); AvalanchePhotons.push_back(NewParticle); AvalancheParticles[k].setMomentum((1 - delta)*AvalancheParticles[k].getMomentum()); } } for (int k = 0; k < AvalanchePhotons.size(); k++) { FP3 k_ = AvalanchePhotons[k].getVelocity(); k_ = (1 / k_.norm()) * k_; // normalized wave vector AvalanchePhotons[k].setPosition(AvalanchePhotons[k].getPosition() + sub_dt * Constants<FP>::lightVelocity() * k_); FP H_eff = sqrt(sqr(E + VP(k_, B)) - sqr(SP(E, k_))); FP gamma = AvalanchePhotons[k].getMomentum().norm() / (Constants<FP>::electronMass() * Constants<FP>::lightVelocity()); FP chi = gamma * H_eff / SchwingerField; FP delta = Pair_Generator(1, chi, gamma, sub_dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Electron); NewParticle.setWeight(AvalanchePhotons[k].getWeight()); NewParticle.setPosition(AvalanchePhotons[k].getPosition()); NewParticle.setMomentum(delta*AvalanchePhotons[k].getMomentum()); AvalancheParticles.push_back(NewParticle); NewParticle.setType(Positron); NewParticle.setMomentum((1 - delta)*AvalanchePhotons[k].getMomentum()); AvalancheParticles.push_back(NewParticle); AvalanchePhotons[k] = AvalanchePhotons[AvalanchePhotons.size() - 1]; AvalanchePhotons.pop_back(); k--; } } } } FP estimatedPhotons(FP HE, FP gamma) { return (0.0827 * HE) * preFactor; } FP estimatedParticles(FP HE, FP gamma) { FP b = 3.0 / 2.0*HE * gamma; FP newFactor; if (b < 0.1) { newFactor = 0.962436 * b / gamma + 0.0827 * HE; } else if (b < 0.5) { newFactor = 0.779009 * pow(b, 11.0 / 12.0) / gamma + 0.0827 * HE; } else if (b < 10) { newFactor = 0.721193 * pow(b, 19.0 / 24.0) / gamma + 0.0827 * HE; } else { newFactor = 0.955556 * pow(b, 2.0 / 3.0) / gamma + 0.0827 * HE; } return newFactor * preFactor; } FP Photon_probability(FP chi, FP gamma, FP d) { FP z = (2 / 3.0) * (1 / chi)*d / (1 - d); FP coeff = (sqrt(3.0) / (2.0*pi)) * coeffPhoton_probability; if ((z < 700) && (z > 0)) return coeff * (chi / gamma) * ((1 - d) / d)*(synchrotron_1(z) + (3 / 2.0) * d * chi * z * synchrotron_2(z)); else return 0; } FP Pair_probability(FP chi, FP gamma, FP d) { FP z_p = (2 / 3.0) / (chi*(1 - d) * d); FP coeff = (sqrt(3.0) / (2.0*pi)) * coeffPair_probability; if ((z_p < 700) && (z_p > 0)) return coeff * (chi / gamma) * (d - 1) * d * (synchrotron_1(z_p) - (3 / 2.0) * chi * z_p * synchrotron_2(z_p)); else return 0; } FP Pair_Generator(FP Factor, FP chi, FP gamma, FP dt) //returns photon energy in mc2gamma in case of generation. { FP factor = Factor * dt * preFactor; FP r1 = random_number_omp(); FP r2 = random_number_omp(); if (r2 < factor * Pair_probability(chi, gamma, r1)) return r1; else return 0; } FP Photon_MGenerator(FP Factor, FP chi, FP gamma, FP dt) //Modified event generator: returns photon energy in mc2gamma in case of generation, !doesn't change gamma { double r0 = random_number_omp(); double r1 = r0 * r0 * r0; double r2 = random_number_omp(); double factor = Factor * dt * preFactor; if (r2 < factor * Photon_probability(chi, gamma, r1) * 3 * r0*r0) return r1; else return 0; } void operator()(ParticleProxy3d& particle, ValueField field, FP timeStep) {} void operator()(Particle3d& particle, ValueField field, FP timeStep) { ParticleProxy3d particleProxy(particle); this->operator()(particleProxy, field, timeStep); } private: FP random_number_omp() { FP rand_n; #pragma omp critical rand_n = distribution(rand_generator); return rand_n; } FP MinProbability, MaxProbability; FP SchwingerField; FP preFactor; FP coeffPhoton_probability, coeffPair_probability; std::default_random_engine rand_generator; std::uniform_real_distribution<FP> distribution; vector<vector<Particle3d>> AvalanchePhotons, AvalancheParticles; vector<vector<Particle3d>> afterAvalanchePhotons, afterAvalancheParticles; };
quicksort.h
// -*- C++ -*- // Copyright (C) 2007-2015 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/quicksort.h * @brief Implementation of a unbalanced parallel quicksort (in-place). * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUICKSORT_H #define _GLIBCXX_PARALLEL_QUICKSORT_H 1 #include <parallel/parallel.h> #include <parallel/partition.h> namespace __gnu_parallel { /** @brief Unbalanced quicksort divide step. * @param __begin Begin iterator of subsequence. * @param __end End iterator of subsequence. * @param __comp Comparator. * @param __pivot_rank Desired __rank of the pivot. * @param __num_samples Choose pivot from that many samples. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> typename std::iterator_traits<_RAIter>::difference_type __parallel_sort_qs_divide(_RAIter __begin, _RAIter __end, _Compare __comp, typename std::iterator_traits <_RAIter>::difference_type __pivot_rank, typename std::iterator_traits <_RAIter>::difference_type __num_samples, _ThreadIndex __num_threads) { typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; _DifferenceType __n = __end - __begin; __num_samples = std::min(__num_samples, __n); // Allocate uninitialized, to avoid default constructor. _ValueType* __samples = static_cast<_ValueType*> (::operator new(__num_samples * sizeof(_ValueType))); for (_DifferenceType __s = 0; __s < __num_samples; ++__s) { const unsigned long long __index = static_cast<unsigned long long> (__s) * __n / __num_samples; ::new(&(__samples[__s])) _ValueType(__begin[__index]); } __gnu_sequential::sort(__samples, __samples + __num_samples, __comp); _ValueType& __pivot = __samples[__pivot_rank * __num_samples / __n]; __gnu_parallel::__binder2nd<_Compare, _ValueType, _ValueType, bool> __pred(__comp, __pivot); _DifferenceType __split = __parallel_partition(__begin, __end, __pred, __num_threads); for (_DifferenceType __s = 0; __s < __num_samples; ++__s) __samples[__s].~_ValueType(); ::operator delete(__samples); return __split; } /** @brief Unbalanced quicksort conquer step. * @param __begin Begin iterator of subsequence. * @param __end End iterator of subsequence. * @param __comp Comparator. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> void __parallel_sort_qs_conquer(_RAIter __begin, _RAIter __end, _Compare __comp, _ThreadIndex __num_threads) { typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; if (__num_threads <= 1) { __gnu_sequential::sort(__begin, __end, __comp); return; } _DifferenceType __n = __end - __begin, __pivot_rank; if (__n <= 1) return; _ThreadIndex __num_threads_left; if ((__num_threads % 2) == 1) __num_threads_left = __num_threads / 2 + 1; else __num_threads_left = __num_threads / 2; __pivot_rank = __n * __num_threads_left / __num_threads; _DifferenceType __split = __parallel_sort_qs_divide (__begin, __end, __comp, __pivot_rank, _Settings::get().sort_qs_num_samples_preset, __num_threads); #pragma omp parallel sections num_threads(2) { #pragma omp section __parallel_sort_qs_conquer(__begin, __begin + __split, __comp, __num_threads_left); #pragma omp section __parallel_sort_qs_conquer(__begin + __split, __end, __comp, __num_threads - __num_threads_left); } } /** @brief Unbalanced quicksort main call. * @param __begin Begin iterator of input sequence. * @param __end End iterator input sequence, ignored. * @param __comp Comparator. * @param __num_threads Number of threads that are allowed to work on * this part. */ template<typename _RAIter, typename _Compare> void __parallel_sort_qs(_RAIter __begin, _RAIter __end, _Compare __comp, _ThreadIndex __num_threads) { _GLIBCXX_CALL(__n) typedef std::iterator_traits<_RAIter> _TraitsType; typedef typename _TraitsType::value_type _ValueType; typedef typename _TraitsType::difference_type _DifferenceType; _DifferenceType __n = __end - __begin; // At least one element per processor. if (__num_threads > __n) __num_threads = static_cast<_ThreadIndex>(__n); __parallel_sort_qs_conquer( __begin, __begin + __n, __comp, __num_threads); } } //namespace __gnu_parallel #endif /* _GLIBCXX_PARALLEL_QUICKSORT_H */
weighted_sptree_inl.h
/* * * Copyright (c) 2014, Laurens van der Maaten (Delft University of Technology) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the Delft University of Technology. * 4. Neither the name of the Delft University of Technology nor the names of * its contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY LAURENS VAN DER MAATEN ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL LAURENS VAN DER MAATEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * */ /* * * Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the Delft University of Technology. * 4. Neither the name of the Delft University of Technology nor the names of * its contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * */ #ifndef WEIGHTED_SPTREE_INL #define WEIGHTED_SPTREE_INL #include <math.h> #include <float.h> #include <stdlib.h> #include <stdio.h> #include <cmath> #include "weighted_sptree.h" #include <math.h> #include <algorithm> namespace hdi{ namespace dr{ //! Constructs cell template <typename scalar_type> WeightedSPTree<scalar_type>::Cell::Cell(unsigned int emb_dimension) { _emb_dimension = emb_dimension; corner = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type)); width = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type)); } template <typename scalar_type> WeightedSPTree<scalar_type>::Cell::Cell(unsigned int emb_dimension, hp_scalar_type* inp_corner, hp_scalar_type* inp_width) { _emb_dimension = emb_dimension; corner = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type)); width = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type)); for(int d = 0; d < _emb_dimension; d++) setCorner(d, inp_corner[d]); for(int d = 0; d < _emb_dimension; d++) setWidth( d, inp_width[d]); } //! Destructs cell template <typename scalar_type> WeightedSPTree<scalar_type>::Cell::~Cell() { free(corner); free(width); } template <typename scalar_type> typename WeightedSPTree<scalar_type>::hp_scalar_type WeightedSPTree<scalar_type>::Cell::getCorner(unsigned int d) { return corner[d]; } template <typename scalar_type> typename WeightedSPTree<scalar_type>::hp_scalar_type WeightedSPTree<scalar_type>::Cell::getWidth(unsigned int d) { return width[d]; } template <typename scalar_type> void WeightedSPTree<scalar_type>::Cell::setCorner(unsigned int d, hp_scalar_type val) { corner[d] = val; } template <typename scalar_type> void WeightedSPTree<scalar_type>::Cell::setWidth(unsigned int d, hp_scalar_type val) { width[d] = val; } // Checks whether a point lies in a cell template <typename scalar_type> bool WeightedSPTree<scalar_type>::Cell::containsPoint(scalar_type point[]) { for(int d = 0; d < _emb_dimension; d++) { if(corner[d] - width[d] > point[d]) return false; if(corner[d] + width[d] < point[d]) return false; } return true; } ///////////////////////////////////////////////////////////////////////////////////////////// //! Default constructor for WeightedSPTree -- build tree, too! template <typename scalar_type> WeightedSPTree<scalar_type>::WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N){ // Compute mean, width, and height of current map (boundaries of WeightedSPTree) hp_scalar_type* mean_Y = (hp_scalar_type*) malloc(D * sizeof(hp_scalar_type)); for(unsigned int d = 0; d < D; d++) mean_Y[d] = .0; hp_scalar_type* min_Y = (hp_scalar_type*) malloc(D * sizeof(hp_scalar_type)); for(unsigned int d = 0; d < D; d++) min_Y[d] = DBL_MAX; hp_scalar_type* max_Y = (hp_scalar_type*) malloc(D * sizeof(hp_scalar_type)); for(unsigned int d = 0; d < D; d++) max_Y[d] = -DBL_MAX; for(unsigned int n = 0; n < N; n++) { for(unsigned int d = 0; d < D; d++) { mean_Y[d] += inp_data[n * D + d]; if(inp_data[n * D + d] < min_Y[d]) min_Y[d] = inp_data[n * D + d]; if(inp_data[n * D + d] > max_Y[d]) max_Y[d] = inp_data[n * D + d]; } } for(int d = 0; d < D; d++) mean_Y[d] /= (hp_scalar_type) N; // Construct WeightedSPTree hp_scalar_type* width = (hp_scalar_type*) malloc(D * sizeof(hp_scalar_type)); for(int d = 0; d < D; d++) //width[d] = fmax(max_Y[d] - mean_Y[d], mean_Y[d] - min_Y[d]) + 1e-5; //C++11 width[d] = std::max(max_Y[d] - mean_Y[d], mean_Y[d] - min_Y[d]) + 1e-5; init(NULL, D, inp_data, weights, mean_Y, width); fill(N); // Clean up memory free(mean_Y); free(max_Y); free(min_Y); free(width); } //! Constructor for WeightedSPTree with particular size and parent -- build the tree, too! template <typename scalar_type> WeightedSPTree<scalar_type>::WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width){ init(NULL, D, inp_data, weights, inp_corner, inp_width); fill(N); } //! Constructor for WeightedSPTree with particular size (do not fill the tree) template <typename scalar_type> WeightedSPTree<scalar_type>::WeightedSPTree(unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width){ init(NULL, D, inp_data, weights, inp_corner, inp_width); } //! Constructor for WeightedSPTree with particular size and parent (do not fill tree) template <typename scalar_type> WeightedSPTree<scalar_type>::WeightedSPTree(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width){ init(inp_parent, D, inp_data, weights, inp_corner, inp_width); } //! Constructor for WeightedSPTree with particular size and parent -- build the tree, too! template <typename scalar_type> WeightedSPTree<scalar_type>::WeightedSPTree(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width){ init(inp_parent, D, inp_data, weights, inp_corner, inp_width); fill(N); } //! Main initialization function template <typename scalar_type> void WeightedSPTree<scalar_type>::init(WeightedSPTree* inp_parent, unsigned int D, scalar_type* inp_data, const scalar_type* weights, hp_scalar_type* inp_corner, hp_scalar_type* inp_width){ parent = inp_parent; _emb_dimension = D; no_children = 2; for(unsigned int d = 1; d < D; d++){ no_children *= 2; } _emb_positions = inp_data; _weights = weights; is_leaf = true; size = 0; cum_size = 0; boundary = new Cell(_emb_dimension); for(unsigned int d = 0; d < D; d++){ boundary->setCorner(d, inp_corner[d]); } for(unsigned int d = 0; d < D; d++){ boundary->setWidth( d, inp_width[d]); } children = (WeightedSPTree**) malloc(no_children * sizeof(WeightedSPTree*)); for(unsigned int i = 0; i < no_children; i++){ children[i] = NULL; } _center_of_mass = (hp_scalar_type*) malloc(D * sizeof(hp_scalar_type)); for(unsigned int d = 0; d < D; d++){ _center_of_mass[d] = .0; } } // Destructor for WeightedSPTree template <typename scalar_type> WeightedSPTree<scalar_type>::~WeightedSPTree() { for(unsigned int i = 0; i < no_children; i++) { if(children[i] != NULL) delete children[i]; } free(children); free(_center_of_mass); //free(buff); delete boundary; } // Update the _emb_positions underlying this tree template <typename scalar_type> void WeightedSPTree<scalar_type>::setData(scalar_type* inp_data, const scalar_type* weights) { _emb_positions = inp_data; } // Get the parent of the current tree template <typename scalar_type> WeightedSPTree<scalar_type>* WeightedSPTree<scalar_type>::getParent() { return parent; } // Insert a point into the WeightedSPTree template <typename scalar_type> bool WeightedSPTree<scalar_type>::insert(unsigned int new_index) { //#pragma critical { // Ignore objects which do not belong in this quad tree scalar_type* point = _emb_positions + new_index * _emb_dimension; if(!boundary->containsPoint(point)) return false; // Online update of cumulative size and center-of-mass //cum_size++; //hp_scalar_type mult1 = (hp_scalar_type) (cum_size - 1) / (hp_scalar_type) cum_size; //hp_scalar_type mult2 = 1.0 / (hp_scalar_type) cum_size; cum_size += _weights[new_index]; hp_scalar_type mult1 = (hp_scalar_type) (cum_size - _weights[new_index]) / (hp_scalar_type) cum_size; hp_scalar_type mult2 = _weights[new_index] / (hp_scalar_type) cum_size; for(unsigned int d = 0; d < _emb_dimension; d++){ _center_of_mass[d] *= mult1; } for(unsigned int d = 0; d < _emb_dimension; d++){ _center_of_mass[d] += mult2 * point[d]; } // If there is space in this quad tree and it is a leaf, add the object here if(is_leaf && size < QT_NODE_CAPACITY) { index[size] = new_index; size++; return true; } // Don't add duplicates for now (this is not very nice) bool any_duplicate = false; for(unsigned int n = 0; n < size; n++) { bool duplicate = true; for(unsigned int d = 0; d < _emb_dimension; d++) { if(point[d] != _emb_positions[index[n] * _emb_dimension + d]) { duplicate = false; break; } } any_duplicate = any_duplicate | duplicate; } if(any_duplicate) return true; // Otherwise, we need to subdivide the current cell if(is_leaf){ subdivide(); } // Find out where the point can be inserted for(unsigned int i = 0; i < no_children; i++) { if(children[i]->insert(new_index)) return true; } // Otherwise, the point cannot be inserted (this should never happen) return false; } } // Create four children which fully divide this cell into four quads of equal area template <typename scalar_type> void WeightedSPTree<scalar_type>::subdivide() { // Create new children hp_scalar_type* new_corner = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type)); hp_scalar_type* new_width = (hp_scalar_type*) malloc(_emb_dimension * sizeof(hp_scalar_type)); for(unsigned int i = 0; i < no_children; i++) { unsigned int div = 1; for(unsigned int d = 0; d < _emb_dimension; d++) { new_width[d] = .5 * boundary->getWidth(d); if((i / div) % 2 == 1) new_corner[d] = boundary->getCorner(d) - .5 * boundary->getWidth(d); else new_corner[d] = boundary->getCorner(d) + .5 * boundary->getWidth(d); div *= 2; } children[i] = new WeightedSPTree(this, _emb_dimension, _emb_positions, _weights, new_corner, new_width); } free(new_corner); free(new_width); // Move existing points to correct children for(unsigned int i = 0; i < size; i++) { bool success = false; for(unsigned int j = 0; j < no_children; j++) { if(!success) success = children[j]->insert(index[i]); } index[i] = -1; } // Empty parent node size = 0; is_leaf = false; } // Build WeightedSPTree on dataset template <typename scalar_type> void WeightedSPTree<scalar_type>::fill(unsigned int N) { int i = 0; //#pragma omp parallel for for(i = 0; i < N; i++) insert(i); } // Checks whether the specified tree is correct template <typename scalar_type> bool WeightedSPTree<scalar_type>::isCorrect() { for(unsigned int n = 0; n < size; n++) { scalar_type* point = _emb_positions + index[n] * _emb_dimension; if(!boundary->containsPoint(point)) return false; } if(!is_leaf) { bool correct = true; for(int i = 0; i < no_children; i++) correct = correct && children[i]->isCorrect(); return correct; } else return true; } // Build a list of all indices in WeightedSPTree template <typename scalar_type> void WeightedSPTree<scalar_type>::getAllIndices(unsigned int* indices) { getAllIndices(indices, 0); } // Build a list of all indices in WeightedSPTree template <typename scalar_type> unsigned int WeightedSPTree<scalar_type>::getAllIndices(unsigned int* indices, unsigned int loc) { // Gather indices in current quadrant for(unsigned int i = 0; i < size; i++) indices[loc + i] = index[i]; loc += size; // Gather indices in children if(!is_leaf) { for(int i = 0; i < no_children; i++) loc = children[i]->getAllIndices(indices, loc); } return loc; } template <typename scalar_type> unsigned int WeightedSPTree<scalar_type>::getDepth() { if(is_leaf) return 1; unsigned int depth = 0; for(unsigned int i = 0; i < no_children; i++) //depth = fmax(depth, children[i]->getDepth()); //C++11 depth = std::max(depth, children[i]->getDepth()); return 1 + depth; } // Compute non-edge forces using Barnes-Hut algorithm template <typename scalar_type> void WeightedSPTree<scalar_type>::computeNonEdgeForces(unsigned int point_index, hp_scalar_type theta, hp_scalar_type neg_f[], hp_scalar_type& sum_Q)const { std::vector<hp_scalar_type> distance(_emb_dimension,0); // Make sure that we spend no time on empty nodes or self-interactions if(cum_size == 0 || (is_leaf && size == 1 && index[0] == point_index)){ return; } // Compute distance between point and center-of-mass hp_scalar_type distance_squared = .0; unsigned int ind = point_index * _emb_dimension; for(unsigned int d = 0; d < _emb_dimension; d++){ distance[d] = _emb_positions[ind + d] - _center_of_mass[d]; } for(unsigned int d = 0; d < _emb_dimension; d++){ distance_squared += distance[d] * distance[d]; } // Check whether we can use this node as a "summary" hp_scalar_type max_width = 0.0; hp_scalar_type cur_width; for(unsigned int d = 0; d < _emb_dimension; d++) { cur_width = boundary->getWidth(d); max_width = (max_width > cur_width) ? max_width : cur_width; } if(is_leaf || (max_width / sqrt(distance_squared) < theta)) { // Compute and add t-SNE force between point and current node hp_scalar_type t_student = 1.0 / (1.0 + distance_squared); sum_Q += _weights[point_index] * cum_size * t_student; hp_scalar_type q_it_squared = t_student * t_student; for(unsigned int d = 0; d < _emb_dimension; d++){ neg_f[d] += _weights[point_index] * cum_size * q_it_squared * distance[d]; } }else{ // Recursively apply Barnes-Hut to children for(unsigned int i = 0; i < no_children; i++){ children[i]->computeNonEdgeForces(point_index, theta, neg_f, sum_Q); } } } //! Print out tree template <typename scalar_type> void WeightedSPTree<scalar_type>::print() { if(cum_size == 0) { printf("Empty node\n"); return; } if(is_leaf) { printf("Leaf node; _emb_positions = ["); for(int i = 0; i < size; i++) { scalar_type* point = _emb_positions + index[i] * _emb_dimension; for(int d = 0; d < _emb_dimension; d++) printf("%f, ", point[d]); printf(" (index = %d)", index[i]); if(i < size - 1) printf("\n"); else printf("]\n"); } } else { printf("Intersection node with center-of-mass = ["); for(int d = 0; d < _emb_dimension; d++) printf("%f, ", _center_of_mass[d]); printf("]; children are:\n"); for(int i = 0; i < no_children; i++) children[i]->print(); } } } } #endif
single_val_ptr.c
#include <stdlib.h> #include <omp.h> int main() { int* ptr = malloc(sizeof(int)); #pragma omp parallel { *ptr = 42; printf("Value: %d\n", *ptr); } free(ptr); }
3.c
/* Написать программу, в которой объявить и присвоить начальные значения целочисленным переменным: a и b, определить две параллельные области, первая – с числом нитей 2, вторая – с числом нитей 4. Для первой области переменные a и b объявить защищенными, с режимом доступа private и firstprivate соответственно, внутри области определить номер нити и выполнить увеличение значения переменных на это величину. Выдать на экран значения переменных до входа в параллельную область, внутри параллельной области и после выхода из параллельной области. Для второй области переменную a объявить общей, переменную b объявить защищенной, с режимом доступа private, определить номер нити и выполнить уменьшение значения переменных на это величину. Выдать на экран значения переменных до входа в параллельную область, внутри параллельной области и после выхода из параллельной области. */ #include <stdio.h> #include <omp.h> int main(int argc, char *argv[]) { int a = 0; int b = 0; printf("Значения до:\n a = %d, b = %d\n", a, b); #pragma omp parallel num_threads(2) private(a) firstprivate(b) { a = 0; a += omp_get_thread_num(); b += omp_get_thread_num(); printf("\tЗначения внутри:\n\t a = %d, b = %d\n", a, b); } printf("Значения после:\n a = %d, b = %d\n", a, b); printf("Значения до:\n a = %d, b = %d\n", a, b); #pragma omp parallel num_threads(2) shared(a) private(b) { b = 0; printf("\tНомер нити: %d\n", omp_get_thread_num()); a -= omp_get_thread_num(); b -= omp_get_thread_num(); printf("\tЗначения внутри:\n\t a = %d, b = %d\n", a, b); } printf("Значения после:\n a = %d, b = %d\n", a, b); }
grid_ao_drv.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <math.h> #include "config.h" #include "cint.h" #include "vhf/fblas.h" // 2 slots of int param[] #define POS_E1 0 #define TENSOR 1 // 128s42p21d12f8g6h4i3j #define NCTR_CART 128 // 72s24p14d10f8g6h5i4j #define NCTR_SPH 72 #define NPRIMAX 64 #define BLKSIZE 96 #define EXPCUTOFF 50 // 1e-22 #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define MAX(X,Y) ((X)>(Y)?(X):(Y)) #define NOTZERO(e) ((e)>1e-18 || (e)<-1e-18) double exp_cephes(double x); double CINTcommon_fac_sp(int l); static int _len_cart[] = { 1, 3, 6, 10, 15, 21, 28, 36 }; void GTOnabla1(double *fx1, double *fy1, double *fz1, double *fx0, double *fy0, double *fz0, int l, double a) { int i; double a2 = -2 * a; fx1[0] = a2*fx0[1]; fy1[0] = a2*fy0[1]; fz1[0] = a2*fz0[1]; for (i = 1; i <= l; i++) { fx1[i] = i*fx0[i-1] + a2*fx0[i+1]; fy1[i] = i*fy0[i-1] + a2*fy0[i+1]; fz1[i] = i*fz0[i-1] + a2*fz0[i+1]; } } /* * r - R_O = (r-R_i) + ri, ri = (x,y,z) = R_i - R_O */ void GTOx1(double *fx1, double *fy1, double *fz1, double *fx0, double *fy0, double *fz0, int l, double *ri) { int i; for (i = 0; i <= l; i++) { fx1[i] = ri[0] * fx0[i] + fx0[i+1]; fy1[i] = ri[1] * fy0[i] + fy0[i+1]; fz1[i] = ri[2] * fz0[i] + fz0[i+1]; } } int GTOprim_exp(double *eprim, double *coord, double *alpha, double *coeff, int l, int nprim, int nctr, int blksize, double fac) { int i, j; double arr, maxc; double logcoeff[nprim]; double rr[blksize]; double *gridx = coord; double *gridy = coord+blksize; double *gridz = coord+blksize*2; int not0 = 0; // the maximum value of the coefficients for each pGTO for (j = 0; j < nprim; j++) { maxc = 0; for (i = 0; i < nctr; i++) { maxc = MAX(maxc, fabs(coeff[i*nprim+j])); } logcoeff[j] = log(maxc); } for (i = 0; i < blksize; i++) { rr[i] = gridx[i]*gridx[i] + gridy[i]*gridy[i] + gridz[i]*gridz[i]; } for (j = 0; j < nprim; j++) { for (i = 0; i < blksize; i++) { arr = alpha[j] * rr[i]; if (arr-logcoeff[j] < EXPCUTOFF) { eprim[j*blksize+i] = exp_cephes(-arr) * fac; not0 = 1; } else { eprim[j*blksize+i] = 0; } } } return not0; } // grid2atm[atm_id,xyz,grid_id] static void _fill_grid2atm(double *grid2atm, double *coord, int blksize, int *atm, int natm, int *bas, int nbas, double *env) { int atm_id, ig; double *r_atm; for (atm_id = 0; atm_id < natm; atm_id++) { r_atm = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; for (ig = 0; ig < blksize; ig++) { grid2atm[0*blksize+ig] = coord[ig*3+0] - r_atm[0]; grid2atm[1*blksize+ig] = coord[ig*3+1] - r_atm[1]; grid2atm[2*blksize+ig] = coord[ig*3+2] - r_atm[2]; } grid2atm += 3*blksize; } } static void _trans(double *ao, double *aobuf, int nao, int blksize, int counts) { int i, j, k; if (blksize == BLKSIZE) { for (k = 0; k < BLKSIZE; k+=16) { for (i = 0; i < counts; i++) { for (j = k; j < k+16; j++) { ao[j*nao+i] = aobuf[i*BLKSIZE+j]; } } } } else if ((blksize % 16) == 0) { for (k = 0; k < blksize; k+=16) { for (i = 0; i < counts; i++) { for (j = k; j < k+16; j++) { ao[j*nao+i] = aobuf[i*blksize+j]; } } } } else { for (i = 0; i < counts; i++) { for (j = 0; j < blksize; j++) { ao[j*nao+i] = aobuf[j]; } aobuf += blksize; } } } static void _set0(double *ao, int nao, int blksize, int counts) { int i, j; for (j = 0; j < blksize; j++) { for (i = 0; i < counts; i++) { ao[j*nao+i] = 0; } } } void GTOeval_sph_iter(void (*feval)(), int (*fexp)(), int param[], int nao, int ngrids, int blksize, int bastart, int bascount, double *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int basend = bastart + bascount; const int atmstart = bas[bastart*BAS_SLOTS+ATOM_OF]; const int atmend = bas[(basend-1)*BAS_SLOTS+ATOM_OF]+1; const int atmcount = atmend - atmstart; int i, k, l, np, nc, atm_id, bas_id, deg; int ao_id = 0; double fac; double *p_exp, *pcoeff, *pcoord, *pcart, *ri; double *paobuf; double eprim[NPRIMAX*blksize*2]; double cart_gto[NCTR_CART*blksize * ncomp]; double aobuf[NCTR_SPH*blksize * ncomp]; double grid2atm[atmcount*3*blksize]; // [atm_id,xyz,grid] _fill_grid2atm(grid2atm, coord, blksize, atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env); for (bas_id = bastart; bas_id < basend; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = l * 2 + 1; fac = CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; pcoord = grid2atm + (atm_id - atmstart) * 3*blksize; if (non0table[bas_id] && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, blksize, fac)) { ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; (*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, l, np, nc, blksize); for (i = 0; i < ncomp; i++) { pcart = cart_gto + i*nc*_len_cart[l]*blksize; if (l < 2) { // s, p functions _trans(ao+i*nao*ngrids+ao_id, pcart, nao, blksize, nc*deg); } else { paobuf = aobuf; for (k = 0; k < nc; k++) { CINTc2s_ket_sph(paobuf, blksize, pcart, l); pcart += _len_cart[l] * blksize; paobuf += deg * blksize; } _trans(ao+i*nao*ngrids+ao_id, aobuf, nao, blksize, nc*deg); } } } else { for (i = 0; i < ncomp; i++) { _set0(ao+i*nao*ngrids+ao_id, nao, blksize, nc*deg); } } ao_id += deg * nc; } } void GTOeval_cart_iter(void (*feval)(), int (*fexp)(), int param[], int nao, int ngrids, int blksize, int bastart, int bascount, double *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int basend = bastart + bascount; const int atmstart = bas[bastart*BAS_SLOTS+ATOM_OF]; const int atmend = bas[(basend-1)*BAS_SLOTS+ATOM_OF]+1; const int atmcount = atmend - atmstart; int i, l, np, nc, atm_id, bas_id, deg; int ao_id = 0; double fac; double *p_exp, *pcoeff, *pcoord, *pcart, *ri; double eprim[NPRIMAX*blksize*2]; double cart_gto[NCTR_CART*blksize * ncomp]; double grid2atm[atmcount*3*blksize]; // [atm_id,xyz,grid] _fill_grid2atm(grid2atm, coord, blksize, atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env); for (bas_id = bastart; bas_id < basend; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = _len_cart[l]; fac = CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; pcoord = grid2atm + (atm_id - atmstart) * 3*blksize; if (non0table[bas_id] && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, blksize, fac)) { ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; (*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, l, np, nc, blksize); for (i = 0; i < ncomp; i++) { pcart = cart_gto + i*nc*_len_cart[l]*blksize; _trans(ao+i*nao*ngrids+ao_id, pcart, nao, blksize, nc*deg); } } else { for (i = 0; i < ncomp; i++) { _set0(ao+i*nao*ngrids+ao_id, nao, blksize, nc*deg); } } ao_id += deg * nc; } } /* * blksize <= 1024 to avoid stack overflow * * non0table[ngrids/blksize,natm] is the T/F table for ao values to * screen the ao evaluation for each shell */ void GTOeval_sph_drv(void (*feval)(), int (*fexp)(), int param[], int nao, int ngrids, int blksize, int bastart, int bascount, double *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { ao += CINTtot_cgto_spheric(bas, bastart); const int nblk = (ngrids+blksize-1) / blksize; int ip, ib; #pragma omp parallel default(none) \ shared(feval, fexp, param, nao, ngrids, bastart, bascount, blksize, \ ao, coord, non0table, atm, natm, bas, nbas, env) \ private(ip, ib) { #pragma omp for nowait schedule(dynamic, 1) for (ib = 0; ib < nblk; ib++) { ip = ib * blksize; GTOeval_sph_iter(feval, fexp, param, nao, ngrids, MIN(ngrids-ip, blksize), bastart, bascount, ao+ip*nao, coord+ip*3, non0table+ib*nbas, atm, natm, bas, nbas, env); } } } void GTOeval_cart_drv(void (*feval)(), int (*fexp)(), int param[], int nao, int ngrids, int blksize, int bastart, int bascount, double *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { ao += CINTtot_cgto_cart(bas, bastart); const int nblk = (ngrids+blksize-1) / blksize; int ip, ib; #pragma omp parallel default(none) \ shared(feval, fexp, param, nao, ngrids, bastart, bascount, blksize, \ ao, coord, non0table, atm, natm, bas, nbas, env) \ private(ip, ib) { #pragma omp for nowait schedule(dynamic, 1) for (ib = 0; ib < nblk; ib++) { ip = ib * blksize; GTOeval_cart_iter(feval, fexp, param, nao, ngrids, MIN(ngrids-ip, blksize), bastart, bascount, ao+ip*nao, coord+ip*3, non0table+ib*nbas, atm, natm, bas, nbas, env); } } }
default-1.c
void foo (void) { int x = 0, i; #pragma omp task default(none) /* { dg-message "note: enclosing 'task'" } */ { x++; /* { dg-error "'x' not specified in enclosing 'task'" } */ } #pragma omp taskloop default(none) /* { dg-message "note: enclosing 'taskloop'" } */ for (i = 0; i < 64; i++) { x++; /* { dg-error "'x' not specified in enclosing 'taskloop'" } */ } #pragma omp teams default(none) /* { dg-message "note: enclosing 'teams'" } */ { x++; /* { dg-error "'x' not specified in enclosing 'teams'" } */ } #pragma omp parallel default(none) /* { dg-message "note: enclosing 'parallel'" } */ { x++; /* { dg-error "'x' not specified in enclosing 'parallel'" } */ } }
omp_reduce.c
/* Copyright 2014-2020 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <complex.h> #include "config.h" #define MIN(x, y) ((x) < (y) ? (x) : (y)) //void NPomp_dsum_reduce_inplace(double **vec, size_t count) //{ // unsigned int nthreads = omp_get_num_threads(); // unsigned int thread_id = omp_get_thread_num(); // unsigned int bit, thread_src; // unsigned int mask = 0; // double *dst = vec[thread_id]; // double *src; // size_t i; //#pragma omp barrier // for (bit = 0; (1<<bit) < nthreads; bit++) { // mask |= 1 << bit; // if (!(thread_id & mask)) { // thread_src = thread_id | (1<<bit); // if (thread_src < nthreads) { // src = vec[thread_src]; // for (i = 0; i < count; i++) { // dst[i] += src[i]; // } // } // } //#pragma omp barrier // } // //} void NPomp_dsum_reduce_inplace(double **vec, size_t count) { unsigned int nthreads = omp_get_num_threads(); unsigned int thread_id = omp_get_thread_num(); size_t blksize = (count + nthreads - 1) / nthreads; size_t start = thread_id * blksize; size_t end = MIN(start + blksize, count); double *dst = vec[0]; double *src; size_t it, i; #pragma omp barrier for (it = 1; it < nthreads; it++) { src = vec[it]; for (i = start; i < end; i++) { dst[i] += src[i]; } } #pragma omp barrier } void NPomp_dprod_reduce_inplace(double **vec, size_t count) { unsigned int nthreads = omp_get_num_threads(); unsigned int thread_id = omp_get_thread_num(); size_t blksize = (count + nthreads - 1) / nthreads; size_t start = thread_id * blksize; size_t end = MIN(start + blksize, count); double *dst = vec[0]; double *src; size_t it, i; #pragma omp barrier for (it = 1; it < nthreads; it++) { src = vec[it]; for (i = start; i < end; i++) { dst[i] *= src[i]; } } #pragma omp barrier } void NPomp_zsum_reduce_inplace(double complex **vec, size_t count) { unsigned int nthreads = omp_get_num_threads(); unsigned int thread_id = omp_get_thread_num(); size_t blksize = (count + nthreads - 1) / nthreads; size_t start = thread_id * blksize; size_t end = MIN(start + blksize, count); double complex *dst = vec[0]; double complex *src; size_t it, i; #pragma omp barrier for (it = 1; it < nthreads; it++) { src = vec[it]; for (i = start; i < end; i++) { dst[i] += src[i]; } } #pragma omp barrier } void NPomp_zprod_reduce_inplace(double complex **vec, size_t count) { unsigned int nthreads = omp_get_num_threads(); unsigned int thread_id = omp_get_thread_num(); size_t blksize = (count + nthreads - 1) / nthreads; size_t start = thread_id * blksize; size_t end = MIN(start + blksize, count); double complex *dst = vec[0]; double complex *src; size_t it, i; #pragma omp barrier for (it = 1; it < nthreads; it++) { src = vec[it]; for (i = start; i < end; i++) { dst[i] *= src[i]; } } #pragma omp barrier } #ifdef _OPENMP int get_omp_threads() { return omp_get_max_threads(); } int set_omp_threads(int n) { omp_set_num_threads(n); return n; } #else // mimic omp_get_max_threads omp_set_num_threads function of libgomp int get_omp_threads() { return 1; } int set_omp_threads(int n) { return 0; } #endif
ConjugateGradient.h
/* * ConjugateGradient.h * * Created on: 15.06.2014 * Author: Daniel Hoske and Michael Wegner */ #ifndef CONJUGATE_GRADIENT_H_ #define CONJUGATE_GRADIENT_H_ #include <cstdint> #include <utility> #include "LinearSolver.h" #include "../algebraic/Vector.h" #include "../algebraic/CSRMatrix.h" namespace NetworKit { /** * @ingroup numerics * Implementation of Conjugate Gradient. */ template<class Matrix, class Preconditioner> class ConjugateGradient : public LinearSolver<Matrix> { public: ConjugateGradient(double tolerance = 1e-5) : LinearSolver<Matrix>(tolerance), matrix(Matrix()) {} void setup(const Matrix& matrix) { this->matrix = matrix; precond = Preconditioner(matrix); } void setupConnected(const Matrix& matrix) { this->matrix = matrix; precond = Preconditioner(matrix); } /** * Solves the linear system \f$Ax = b\f$ using the conjugate gradient method * with a given preconditioner and with initial value \f$(0, \dots, 0)^T\f$. * We the return the solution \f$x\f$. The solution \f$x\f$ fulfils * \f$\frac{\Vert Ax - b\Vert}{\Vert b \Vert} \leq relative\_residual\f$ if the * algorithm has converged. * * Obviously, @a A needs to have the same number of rows as @a b and * @a status.residual must be nonnegative. You may also request that the algorithm * does not run for more than @a status.max_iters iterations. */ SolverStatus solve(const Vector& rhs, Vector& result, count maxConvergenceTime = 5 * 60 * 1000, count maxIterations = std::numeric_limits<count>::max()); /** * Solves the linear systems in parallel. * @param rhs * @param results * @param maxConvergenceTime * @param maxIterations */ void parallelSolve(const std::vector<Vector>& rhs, std::vector<Vector>& results, count maxConvergenceTime = 5 * 60 * 1000, count maxIterations = std::numeric_limits<count>::max()); private: Matrix matrix; Preconditioner precond; }; template<class Matrix, class Preconditioner> SolverStatus ConjugateGradient<Matrix, Preconditioner>::solve(const Vector& rhs, Vector& result, count maxConvergenceTime, count maxIterations) { assert(matrix.numberOfRows() == rhs.getDimension()); // Absolute residual to achieve double sqr_desired_residual = this->tolerance * this->tolerance * (rhs.length() * rhs.length()); // Main loop. See: http://en.wikipedia.org/wiki/Conjugate_gradient_method#The_resulting_algorithm Vector residual_dir = rhs - matrix*result; Vector conjugate_dir = precond.rhs(residual_dir); double sqr_residual = Vector::innerProduct(residual_dir, residual_dir); double sqr_residual_precond = Vector::innerProduct(residual_dir, conjugate_dir); count niters = 0; Vector tmp, residual_precond; while (sqr_residual > sqr_desired_residual) { niters++; if (niters > maxIterations) { break; } tmp = matrix * conjugate_dir; double step = sqr_residual_precond / Vector::innerProduct(conjugate_dir, tmp); result += step * conjugate_dir; residual_dir -= step * tmp; sqr_residual = Vector::innerProduct(residual_dir, residual_dir); residual_precond = precond.rhs(residual_dir); double new_sqr_residual_precond = Vector::innerProduct(residual_dir, residual_precond); conjugate_dir = (new_sqr_residual_precond / sqr_residual_precond) * conjugate_dir + residual_precond; sqr_residual_precond = new_sqr_residual_precond; } SolverStatus status; status.numIters = niters; status.residual = (rhs - matrix*result).length(); status.converged = status.residual / rhs.length() <= this->tolerance; return status; } template<class Matrix, class Preconditioner> void ConjugateGradient<Matrix, Preconditioner>::parallelSolve(const std::vector<Vector>& rhs, std::vector<Vector>& results, count maxConvergenceTime, count maxIterations) { #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(rhs.size()); ++i) { this->solve(rhs[i], results[i], maxConvergenceTime, maxIterations); } } } /* namespace NetworKit */ #endif /* CONJUGATE_GRADIENT_H_ */
3.race2.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N][N]; #pragma omp parallel for for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) for (int k = 1; k < N; k++) A[i][j][k] = A[i - 1][j][k]; } // CHECK: Data Race detected // END
FasterGossipCommMulti.h
/* Copyright 2020 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #pragma once #include <hwloc.h> #include <hwloc/cudart.h> #include <omp.h> #include <ucp/api/ucp.h> #include <algorithm> #include "FasterGossipCommMultiTraits.h" #include "mpi.h" #define WARM_UP_ROUND 2 namespace HugeCTR { namespace GossipComm { // The empty call back function for UCP communication API inline void empty_send_callback_func(void *request, ucs_status_t status) {} inline void empty_recv_callback_func(void *request, ucs_status_t status, ucp_tag_recv_info_t *info) {} template <typename data_t_> class FasterGossipCommMulti : public FasterComm { public: using GossipMultiCommTraits = FasterGossipCommMultiAll2AllTraits<data_t_>; using FasterGossipComm = typename GossipMultiCommTraits::FasterGossipComm; using gpu_id_t = typename GossipMultiCommTraits::gpu_id_t; // Ctor FasterGossipCommMulti(const std::string &plan_file, const std::vector<gpu_id_t> &GPU_list, const int num_proc, const int rank, MPI_Comm comm) : GPU_list_(GPU_list), rank_(rank), num_proc_(num_proc), comm_(comm), GossipCommHandle_(num_proc_), local_buffer_(GPU_list_.size()), recv_buffer_(GPU_list_.size()), temp_buf_(GPU_list_.size()), temp_table_(GPU_list_.size(), std::vector<size_t>(GPU_list_.size())), temp_src_(GPU_list_.size()), temp_dst_(GPU_list_.size()), affinity_list_(num_proc_), send_reqs_(GPU_list_.size(), nullptr), recv_reqs_(GPU_list_.size(), nullptr) { // Do some check assert((num_proc_ > 0) && "The number of process is not greater than 0!\n"); assert((rank_ >= 0) && (rank_ < num_proc_) && "The rank of this process is not valid!\n"); // Local and total GPU count num_local_gpu_ = GPU_list_.size(); num_total_gpu_ = num_proc_ * num_local_gpu_; assert((num_local_gpu_ > 0) && "The number of local GPUs is not valid!\n"); // Create MPI_Request buffer // request_ = (MPI_Request* )malloc(2 * num_local_gpu_ * sizeof(MPI_Request)); // Construct the local gossip all2all library for (int stage = 0; stage < num_proc_; stage++) { GossipCommHandle_[stage] = new FasterGossipComm(plan_file, GPU_list_); } // HWLOC variable setup hwloc_topology_init(&topo_); hwloc_topology_set_io_types_filter(topo_, HWLOC_TYPE_FILTER_KEEP_ALL); hwloc_topology_load(topo_); hwloc_cpuset_t ori_cpu_set; hwloc_cpuset_t cpu_set; ori_cpu_set = hwloc_bitmap_alloc(); cpu_set = hwloc_bitmap_alloc(); // Get the original thread binding for recovery hwloc_get_cpubind(topo_, ori_cpu_set, HWLOC_CPUBIND_THREAD); // Get the number of CPU sockets and resize the UCP vector socket_num_ = hwloc_get_nbobjs_by_type(topo_, HWLOC_OBJ_PACKAGE); assert((socket_num_ > 0) && "The number of CPU sockets is not valid!\n"); // Temp variable used to initialize UCP environment ucp_params_t ucp_params; ucp_config_t *ucp_config; ucp_worker_params_t ucp_worker_params; size_t ucp_worker_address_len; std::vector<ucp_ep_params_t> ucp_ep_params(socket_num_ * num_proc_); ucp_context_.resize(socket_num_); ucp_worker_.resize(socket_num_); ucp_worker_address_.resize(socket_num_); ucp_worker_address_book_.resize(socket_num_ * num_proc_); ucp_endpoints_.resize(socket_num_, std::vector<ucp_ep_h>(socket_num_ * num_proc_)); // Initialize UCP Env on different CPU sockets for (int i = 0; i < socket_num_; i++) { // Bind the current thread to run on target CPU socket hwloc_obj_t current_socket = hwloc_get_obj_by_type(topo_, HWLOC_OBJ_PACKAGE, i); hwloc_set_cpubind(topo_, current_socket->cpuset, HWLOC_CPUBIND_THREAD); // Test the place where the current thread is running hwloc_get_last_cpu_location(topo_, cpu_set, HWLOC_CPUBIND_THREAD); char *cpu_string; hwloc_bitmap_asprintf(&cpu_string, cpu_set); printf("On rank %d, the cpu set that current thread is running on is : %s.\n", rank_, cpu_string); free(cpu_string); // Initialize UCP context memset(&ucp_params, 0, sizeof(ucp_params)); ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES | UCP_PARAM_FIELD_ESTIMATED_NUM_EPS; ucp_params.features = UCP_FEATURE_TAG; ucp_params.estimated_num_eps = socket_num_ * num_proc_; ucp_config_read(NULL, NULL, &ucp_config); ucp_init(&ucp_params, ucp_config, &ucp_context_[i]); ucp_config_release(ucp_config); // Initialize UCP worker memset(&ucp_worker_params, 0, sizeof(ucp_worker_params)); ucp_worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE; ucp_worker_params.thread_mode = UCS_THREAD_MODE_SINGLE; // only single thread can access this // worker at one time, i.e. no thread // safety. ucp_worker_create(ucp_context_[i], &ucp_worker_params, &ucp_worker_[i]); // Get address for local worker ucp_worker_get_address(ucp_worker_[i], &ucp_worker_address_[i], &ucp_worker_address_len); } // Recover the CPU binding of current thread hwloc_set_cpubind(topo_, ori_cpu_set, HWLOC_CPUBIND_THREAD); // Create EPs for local worker // Allocate address for all(local and remote) workers for (auto &iaddress : ucp_worker_address_book_) { iaddress = (ucp_address_t *)malloc(ucp_worker_address_len); } // Copy local worker address to address table for (int i = 0; i < socket_num_; i++) { memcpy(ucp_worker_address_book_[rank_ * socket_num_ + i], ucp_worker_address_[i], ucp_worker_address_len); } // Using MPI to broadcast address from all ranks to all ranks(all broadcast) for (int iroot = 0; iroot < num_proc_; iroot++) { for (int i = 0; i < socket_num_; i++) { MPI_Bcast(ucp_worker_address_book_[iroot * socket_num_ + i], ucp_worker_address_len, MPI_BYTE, iroot, comm_); } } // Create EPs on local worker to other workers(include itself) for (int socket = 0; socket < socket_num_; socket++) { for (int i = 0; i < socket_num_ * num_proc_; i++) { // Only need to set once if (socket == 0) { memset(&ucp_ep_params[i], 0, sizeof(ucp_ep_params[i])); ucp_ep_params[i].field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS; ucp_ep_params[i].address = ucp_worker_address_book_[i]; } ucp_ep_create(ucp_worker_[socket], &ucp_ep_params[i], &ucp_endpoints_[socket][i]); } } // Allocate affinity list for all GPUs on all nodes for (int i = 0; i < num_proc_; i++) { affinity_list_[i] = (gpu_id_t *)malloc(num_local_gpu_ * sizeof(*affinity_list_[i])); } // Assign each local T-GPU to the local L-socket for (int i = 0; i < num_local_gpu_; i++) { // Find the affinity CPU set that current topo GPU is binding to hwloc_cudart_get_device_cpuset(topo_, GPU_list_[i], cpu_set); hwloc_obj_t affinity_socket = hwloc_get_next_obj_covering_cpuset_by_type(topo_, cpu_set, HWLOC_OBJ_PACKAGE, NULL); affinity_list_[rank_][i] = (gpu_id_t)(affinity_socket->logical_index); } // Using MPI to broadcast GPU locality info to all other ranks for (int iroot = 0; iroot < num_proc_; iroot++) { MPI_Bcast(affinity_list_[iroot], num_local_gpu_ * sizeof(*affinity_list_[iroot]), MPI_BYTE, iroot, comm_); } hwloc_bitmap_free(ori_cpu_set); hwloc_bitmap_free(cpu_set); } // Dtor ~FasterGossipCommMulti() { // free(request_); for (int stage = 0; stage < num_proc_; stage++) { delete GossipCommHandle_[stage]; } // Release UCP EPs for (int socket = 0; socket < socket_num_; socket++) { for (int irank = 0; irank < socket_num_ * num_proc_; irank++) { // Flush all operations associated with the EP and release the EP ucs_status_ptr_t ucs_status_ptr = ucp_ep_close_nb(ucp_endpoints_[socket][irank], UCP_EP_CLOSE_MODE_FLUSH); if (UCS_PTR_IS_ERR(ucs_status_ptr) || UCS_PTR_STATUS(ucs_status_ptr) == UCS_OK) { continue; } // While the releasing is not finished, progress the worker while (ucp_request_check_status(ucs_status_ptr) == UCS_INPROGRESS) { for (int j = 0; j < socket_num_; j++) { ucp_worker_progress(ucp_worker_[j]); } } // Free the request ucp_request_free(ucs_status_ptr); } } // Wait for all ranks to release EPs before releasing any worker MPI_Barrier(comm_); // Release worker address for (int i = 0; i < socket_num_; i++) { ucp_worker_release_address(ucp_worker_[i], ucp_worker_address_[i]); } // Release worker for (int i = 0; i < socket_num_; i++) { ucp_worker_destroy(ucp_worker_[i]); } // Release UCP context for (int i = 0; i < socket_num_; i++) { ucp_cleanup(ucp_context_[i]); } // Free address book for (auto &iaddress : ucp_worker_address_book_) { free(iaddress); } // Free HWLOC topology hwloc_topology_destroy(topo_); // Free GPU affinity list for (int i = 0; i < num_proc_; i++) { free(affinity_list_[i]); } } // Initialize a communication void Initialize(const std::vector<data_t_ *> &src, const std::vector<data_t_ *> &dst, const std::vector<std::vector<size_t>> &send_table, const std::vector<std::vector<size_t>> &recv_table) { // Device restorer CudaDeviceContext context; // record user provide data src_ = src; dst_ = dst; send_table_ = send_table; recv_table_ = recv_table; // Calculate the size of Local buffers and Recv buffers, and allocate on each local GPU for (int i = 0; i < num_local_gpu_; i++) { size_t max_size = 0; for (int j = 0; j < num_proc_; j++) { if (j != rank_) { size_t accum_size = 0; for (int k = 0; k < num_local_gpu_; k++) { accum_size += recv_table_[k][i + j * num_local_gpu_]; } max_size = std::max(max_size, accum_size); } } // Allocate buffers on current topo GPU context.set_device(GPU_list_[i]); CK_CUDA_THROW_(cudaMalloc(&local_buffer_[i], sizeof(data_t_) * max_size)); CK_CUDA_THROW_(cudaMalloc(&recv_buffer_[i], sizeof(data_t_) * max_size)); } // Max buffer size required by gossip all2all on each GPU std::vector<size_t> max_temp_buf_size(num_local_gpu_, 0); // Initialize all gossip all2all object for (int stage = 0; stage < num_proc_; stage++) { // for first stage, do all2all on local data if (stage == 0) { // Extract the temp table for local all2all on this stage for (int i = 0; i < num_local_gpu_; i++) { for (int j = 0; j < num_local_gpu_; j++) { temp_table_[i][j] = recv_table_[j][rank_ * num_local_gpu_ + i]; } } // Extract the temp src and dst buffers for local all2all on this stage for (int i = 0; i < num_local_gpu_; i++) { size_t src_offset = 0; size_t dst_offset = 0; for (int j = 0; j < num_local_gpu_ * rank_; j++) { src_offset += send_table_[i][j]; dst_offset += recv_table_[i][j]; } temp_src_[i] = src_[i] + src_offset; temp_dst_[i] = dst_[i] + dst_offset; } // Initialize the local all2all std::vector<size_t> temp_buf_size = GossipCommHandle_[stage]->Initialize_no_malloc(temp_src_, temp_dst_, temp_table_); // Find the largest buffer size needed on each GPU for (int i = 0; i < num_local_gpu_; i++) { max_temp_buf_size[i] = std::max(temp_buf_size[i], max_temp_buf_size[i]); } } // for later stage, do all2all with data received from previous stage else { // previous stage src node int prev_src_node = (rank_ + num_proc_ - stage) % num_proc_; // Extract the temp table for local all2all on this stage for (int i = 0; i < num_local_gpu_; i++) { for (int j = 0; j < num_local_gpu_; j++) { temp_table_[i][j] = recv_table_[j][prev_src_node * num_local_gpu_ + i]; } } // Extract the temp dst buffers for local all2all on this stage for (int i = 0; i < num_local_gpu_; i++) { size_t dst_offset = 0; for (int j = 0; j < num_local_gpu_ * prev_src_node; j++) { dst_offset += recv_table_[i][j]; } temp_dst_[i] = dst_[i] + dst_offset; } std::vector<size_t> temp_buf_size; // Initialize the local all2all if (stage % 2 == 0) { temp_buf_size = GossipCommHandle_[stage]->Initialize_no_malloc(local_buffer_, temp_dst_, temp_table_); } else { temp_buf_size = GossipCommHandle_[stage]->Initialize_no_malloc(recv_buffer_, temp_dst_, temp_table_); } // Find the largest buffer size needed on each GPU for (int i = 0; i < num_local_gpu_; i++) { max_temp_buf_size[i] = std::max(temp_buf_size[i], max_temp_buf_size[i]); } } } // Allocate max size temp buffers shared by all gossip all2all for (int i = 0; i < num_local_gpu_; i++) { // Allocate temp buffers on each GPU context.set_device(GPU_list_[i]); CK_CUDA_THROW_(cudaMalloc(&temp_buf_[i], sizeof(data_t_) * max_temp_buf_size[i])); } // Set the allocated temp buffers to all gossip all2all for (int stage = 0; stage < num_proc_; stage++) { GossipCommHandle_[stage]->set_buf(temp_buf_); } // Run exec() in advance to warm up all buffers used by UCX // For even nodes, 1 run is enough for warm up, for odd nodes, 2 runs is needed for (int i = 0; i < WARM_UP_ROUND; i++) { exec(); } } void exec() { // loop through all stages for (int stage = 0; stage < num_proc_; stage++) { // We cuse 2 threads, one for UCX P2P, one for gossip all2all. In the same stage, these 2 operations // can be executed concurrently #pragma omp parallel default(none) \ shared(stage, num_proc_, rank_, num_local_gpu_, send_table_, affinity_list_, send_reqs_, \ ucp_endpoints_, socket_num_, src_, recv_table_, recv_reqs_, ucp_worker_, recv_buffer_, \ GossipCommHandle_) num_threads(2) { // Each thread grab its ID within this OpenMP thread team int thread_id = omp_get_thread_num(); // Thread 0 do the gossip all2all if (thread_id == 0) { // do local all2all // Execute the local all2all GossipCommHandle_[stage]->exec(); } // Thread 1 do the UCX P2P else { // for all stage except last stage, send and receive data to/from other nodes if (stage < num_proc_ - 1) { // The dst and src rank of local node in this stage int dst_rank = (rank_ + stage + 1) % num_proc_; int src_rank = (rank_ + num_proc_ - stage - 1) % num_proc_; // loop through all local GPUs to send GPU buffers to dst worker for (int i = 0; i < num_local_gpu_; i++) { size_t src_offset = 0; size_t src_len = 0; // Accumulate the offset within the src_buffer for (int j = 0; j < num_local_gpu_ * dst_rank; j++) { src_offset += send_table_[i][j]; } // Accumulate the amount of elements to send to the target node for (int j = 0; j < num_local_gpu_; j++) { src_len += send_table_[i][j + num_local_gpu_ * dst_rank]; } // MPI_Isend(src_[i] + src_offset, sizeof(data_t_) * src_len, MPI_BYTE, dst_rank, i, // comm_, request_ + i); // Prepare the tag for tag-matching massage passing, the tag should identify the user // tag, source worker of the tag and other info ucp_tag_t comm_tag = 0LLU; // MSB 32-bit for original MPI TAG comm_tag |= ((ucp_tag_t)i << 32); // 16-32 bits are source rank comm_tag |= ((ucp_tag_t)(rank_ & 0x0000FFFF) << 16); // The 0-15 bits are source L-socket(worker) comm_tag |= (((ucp_tag_t)(affinity_list_[rank_][i])) & 0x000000000000FFFF); send_reqs_[i] = ucp_tag_send_nb( ucp_endpoints_[affinity_list_[rank_][i]] [dst_rank * socket_num_ + affinity_list_[dst_rank][i]], src_[i] + src_offset, sizeof(data_t_) * src_len, ucp_dt_make_contig(sizeof(char)), comm_tag, empty_send_callback_func); // If the returned request is not a valid pointer, that means that the operation // already finished(failed or completed), the callback will not been // called in these situation and the returned request is not de-referencable thus no // release needed. if (UCS_PTR_IS_ERR(send_reqs_[i]) || UCS_PTR_STATUS(send_reqs_[i]) == UCS_OK) { send_reqs_[i] = nullptr; } } // loop through all local GPUs to receive GPU buffers from src worker for (int i = 0; i < num_local_gpu_; i++) { size_t dst_len = 0; // Accumulate the amount of elements to receive from the source node for (int j = 0; j < num_local_gpu_; j++) { dst_len += recv_table_[j][i + src_rank * num_local_gpu_]; } // MPI_Irecv(recv_buffer_[i], sizeof(data_t_) * dst_len, MPI_BYTE, src_rank, i, comm_, // request_ + num_local_gpu_ +i); // Prepare the tag for tag-matching massage passing, the tag should identify the user // tag, source worker of the tag and other info ucp_tag_t comm_tag = 0LLU; // MSB 32-bit for original MPI TAG comm_tag |= ((ucp_tag_t)i << 32); // 16-32 bits are source rank comm_tag |= ((ucp_tag_t)(src_rank & 0x0000FFFF) << 16); // The 0-15 bits are source L-socket(worker) comm_tag |= (((ucp_tag_t)(affinity_list_[src_rank][i])) & 0x000000000000FFFF); recv_reqs_[i] = ucp_tag_recv_nb(ucp_worker_[affinity_list_[rank_][i]], recv_buffer_[i], sizeof(data_t_) * dst_len, ucp_dt_make_contig(sizeof(char)), comm_tag, (ucp_tag_t)-1, empty_recv_callback_func); // The same as send, but recv API never return UCS_OK, only UCS_ERR_xx or valid // pointer can be returned if (UCS_PTR_IS_ERR(recv_reqs_[i])) { recv_reqs_[i] = nullptr; } } } // for all stage except last stage, wait for UCX communication to finish if (stage < num_proc_ - 1) { // Wait for all send to finish for (int i = 0; i < num_local_gpu_; i++) { // If the current operation is not completed yet, progress it while (send_reqs_[i] != nullptr && ucp_request_check_status(send_reqs_[i]) == UCS_INPROGRESS) { for (int j = 0; j < socket_num_; j++) { ucp_worker_progress(ucp_worker_[j]); } } } // Wait for all receive to finish for (int i = 0; i < num_local_gpu_; i++) { // If the current operation is not completed yet, progress it while (recv_reqs_[i] != nullptr && ucp_request_check_status(recv_reqs_[i]) == UCS_INPROGRESS) { for (int j = 0; j < socket_num_; j++) { ucp_worker_progress(ucp_worker_[j]); } } } // Da-allocate UCP request before going to next round for (int i = 0; i < num_local_gpu_; i++) { if (send_reqs_[i] != nullptr) { ucp_request_free(send_reqs_[i]); send_reqs_[i] = nullptr; } if (recv_reqs_[i] != nullptr) { ucp_request_free(recv_reqs_[i]); recv_reqs_[i] = nullptr; } } // MPI_Waitall(2 * num_local_gpu_, request_, MPI_STATUSES_IGNORE); } } } // Swap recv_buffer and local_buffer pointer. If there is odd nodes, do not swap in the last // stage if (num_proc_ % 2 != 0 && stage == num_proc_ - 1) { continue; } recv_buffer_.swap(local_buffer_); } // stage loop } void reset() { // Device restorer CudaDeviceContext context; // Free local_buffer and recv_buffer, ready for next multi-node all2all for (int i = 0; i < num_local_gpu_; i++) { // Free temp buffers on each GPU context.set_device(GPU_list_[i]); CK_CUDA_THROW_(cudaFree(local_buffer_[i])); CK_CUDA_THROW_(cudaFree(recv_buffer_[i])); } // Free gossip all2all temp buffers for (int i = 0; i < num_local_gpu_; i++) { context.set_device(GPU_list_[i]); CK_CUDA_THROW_(cudaFree(temp_buf_[i])); } } private: // GPU list std::vector<gpu_id_t> GPU_list_; // GPU count gpu_id_t num_local_gpu_; gpu_id_t num_total_gpu_; // MPI-related resource int rank_; int num_proc_; MPI_Comm comm_; // MPI_Request * request_; // Local gossip all2all library std::vector<FasterGossipComm *> GossipCommHandle_; // Temp local GPU buffers for remote data std::vector<data_t_ *> local_buffer_; std::vector<data_t_ *> recv_buffer_; // Temp local GPU buffers for local all2all std::vector<data_t_ *> temp_buf_; // Buffers and tables provided by users std::vector<data_t_ *> src_; std::vector<data_t_ *> dst_; std::vector<std::vector<size_t>> send_table_; std::vector<std::vector<size_t>> recv_table_; // Temp table for local all2all std::vector<std::vector<size_t>> temp_table_; // Temp src and dst pinter vector for local all2all std::vector<data_t_ *> temp_src_; std::vector<data_t_ *> temp_dst_; // Socket count int socket_num_; // UCP variable: UCP context, UCP worker, UCP address, UCP EP and UCP request std::vector<ucp_context_h> ucp_context_; std::vector<ucp_worker_h> ucp_worker_; std::vector<ucp_address_t *> ucp_worker_address_; std::vector<ucp_address_t *> ucp_worker_address_book_; std::vector<std::vector<ucp_ep_h>> ucp_endpoints_; std::vector<ucs_status_ptr_t> send_reqs_; std::vector<ucs_status_ptr_t> recv_reqs_; // HWLOC variable: topo hwloc_topology_t topo_; // The buffers that record the locality of each GPU in GPU list on each nodes std::vector<gpu_id_t *> affinity_list_; }; // class } // namespace GossipComm } // namespace HugeCTR
par_relax_more.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * a few more relaxation schemes: Chebychev, FCF-Jacobi, CG - * these do not go through the CF interface (hypre_BoomerAMGRelaxIF) * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "float.h" HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int*,HYPRE_Real *,HYPRE_Real *,HYPRE_Int *); HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real*, HYPRE_Real*); /****************************************************************************** * *use max norm to estimate largest eigenvalue * *****************************************************************************/ HYPRE_Int hypre_ParCSRMaxEigEstimate(hypre_ParCSRMatrix *A, /* matrix to relax with */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Real *max_eig) { HYPRE_Real e_max; HYPRE_Real row_sum, max_norm; HYPRE_Real *A_diag_data; HYPRE_Real *A_offd_data; HYPRE_Real temp; HYPRE_Real diag_value; HYPRE_Int pos_diag, neg_diag; HYPRE_Int A_num_rows; HYPRE_Int *A_diag_i; HYPRE_Int *A_offd_i; HYPRE_Int j; HYPRE_Int i, start; /* estimate with the inf-norm of A - should be ok for SPD matrices */ A_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); A_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); A_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); A_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(A)); A_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(A)); max_norm = 0.0; pos_diag = neg_diag = 0; for ( i = 0; i < A_num_rows; i++ ) { start = A_diag_i[i]; diag_value = A_diag_data[start]; if (diag_value > 0) { pos_diag++; } if (diag_value < 0) { neg_diag++; diag_value = -diag_value; } row_sum = diag_value; /*for (j = 0; j < row_length; j++)*/ for (j = start+1; j < A_diag_i[i+1]; j++) { row_sum += fabs(A_diag_data[j]); } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { row_sum += fabs(A_offd_data[j]); } if (scale) { if (diag_value != 0.0) row_sum = row_sum/diag_value; } if ( row_sum > max_norm ) max_norm = row_sum; } /* get max across procs */ hypre_MPI_Allreduce(&max_norm, &temp, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); max_norm = temp; /* from Charles */ if ( pos_diag == 0 && neg_diag > 0 ) max_norm = - max_norm; /* eig estimates */ e_max = max_norm; /* return */ *max_eig = e_max; return hypre_error_flag; } /****************************************************************************** use CG to get the eigenvalue estimate scale means get eig est of (D^{-1/2} A D^{-1/2} ******************************************************************************/ HYPRE_Int hypre_ParCSRMaxEigEstimateCG( hypre_ParCSRMatrix *A, /* matrix to relax with */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Int max_iter, HYPRE_Real *max_eig, HYPRE_Real *min_eig ) { HYPRE_Int i, j, err; hypre_ParVector *p; hypre_ParVector *s; hypre_ParVector *r; hypre_ParVector *ds; hypre_ParVector *u; HYPRE_Real *tridiag = NULL; HYPRE_Real *trioffd = NULL; HYPRE_Real lambda_max ; HYPRE_Real beta, gamma = 0.0, alpha, sdotp, gamma_old, alphainv; HYPRE_Real diag; HYPRE_Real lambda_min; HYPRE_Real *s_data, *p_data, *ds_data, *u_data; HYPRE_Int local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); /* check the size of A - don't iterate more than the size */ HYPRE_BigInt size = hypre_ParCSRMatrixGlobalNumRows(A); if (size < (HYPRE_BigInt) max_iter) { max_iter = (HYPRE_Int) size; } /* create some temp vectors: p, s, r , ds, u*/ r = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(r); hypre_ParVectorSetPartitioningOwner(r,0); p = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(p); hypre_ParVectorSetPartitioningOwner(p,0); s = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(s); hypre_ParVectorSetPartitioningOwner(s,0); ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(ds); hypre_ParVectorSetPartitioningOwner(ds,0); u = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(u); hypre_ParVectorSetPartitioningOwner(u,0); /* point to local data */ s_data = hypre_VectorData(hypre_ParVectorLocalVector(s)); p_data = hypre_VectorData(hypre_ParVectorLocalVector(p)); ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds)); u_data = hypre_VectorData(hypre_ParVectorLocalVector(u)); /* make room for tri-diag matrix */ tridiag = hypre_CTAlloc(HYPRE_Real, max_iter+1, HYPRE_MEMORY_HOST); trioffd = hypre_CTAlloc(HYPRE_Real, max_iter+1, HYPRE_MEMORY_HOST); for (i=0; i < max_iter + 1; i++) { tridiag[i] = 0; trioffd[i] = 0; } /* set residual to random */ hypre_ParVectorSetRandomValues(r,1); if (scale) { for (i = 0; i < local_size; i++) { diag = A_diag_data[A_diag_i[i]]; ds_data[i] = 1/sqrt(diag); } } else { /* set ds to 1 */ hypre_ParVectorSetConstantValues(ds,1.0); } /* gamma = <r,Cr> */ gamma = hypre_ParVectorInnerProd(r,p); /* for the initial filling of the tridiag matrix */ beta = 1.0; i = 0; while (i < max_iter) { /* s = C*r */ /* TO DO: C = diag scale */ hypre_ParVectorCopy(r, s); /*gamma = <r,Cr> */ gamma_old = gamma; gamma = hypre_ParVectorInnerProd(r,s); if (i==0) { beta = 1.0; /* p_0 = C*r */ hypre_ParVectorCopy(s, p); } else { /* beta = gamma / gamma_old */ beta = gamma / gamma_old; /* p = s + beta p */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j=0; j < local_size; j++) { p_data[j] = s_data[j] + beta*p_data[j]; } } if (scale) { /* s = D^{-1/2}A*D^{-1/2}*p */ for (j = 0; j < local_size; j++) { u_data[j] = ds_data[j] * p_data[j]; } hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, s); for (j = 0; j < local_size; j++) { s_data[j] = ds_data[j] * s_data[j]; } } else { /* s = A*p */ hypre_ParCSRMatrixMatvec(1.0, A, p, 0.0, s); } /* <s,p> */ sdotp = hypre_ParVectorInnerProd(s,p); /* alpha = gamma / <s,p> */ alpha = gamma/sdotp; /* get tridiagonal matrix */ alphainv = 1.0/alpha; tridiag[i+1] = alphainv; tridiag[i] *= beta; tridiag[i] += alphainv; trioffd[i+1] = alphainv; trioffd[i] *= sqrt(beta); /* x = x + alpha*p */ /* don't need */ /* r = r - alpha*s */ hypre_ParVectorAxpy( -alpha, s, r); i++; } /* eispack routine - eigenvalues return in tridiag and ordered*/ hypre_LINPACKcgtql1(&i,tridiag,trioffd,&err); lambda_max = tridiag[i-1]; lambda_min = tridiag[0]; /* hypre_printf("linpack max eig est = %g\n", lambda_max);*/ /* hypre_printf("linpack min eig est = %g\n", lambda_min);*/ hypre_TFree(tridiag, HYPRE_MEMORY_HOST); hypre_TFree(trioffd, HYPRE_MEMORY_HOST); hypre_ParVectorDestroy(r); hypre_ParVectorDestroy(s); hypre_ParVectorDestroy(p); hypre_ParVectorDestroy(ds); hypre_ParVectorDestroy(u); /* return */ *max_eig = lambda_max; *min_eig = lambda_min; return hypre_error_flag; } /****************************************************************************** Chebyshev relaxation Can specify order 1-4 (this is the order of the resid polynomial)- here we explicitly code the coefficients (instead of iteratively determining) variant 0: standard chebyshev this is rlx 11 if scale = 0, and 16 if scale == 1 variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t) this is rlx 15 if scale = 0, and 17 if scale == 1 ratio indicates the percentage of the whole spectrum to use (so .5 means half, and .1 means 10percent) *******************************************************************************/ HYPRE_Int hypre_ParCSRRelax_Cheby( hypre_ParCSRMatrix *A, /* matrix to relax with */ hypre_ParVector *f, /* right-hand side */ HYPRE_Real max_eig, HYPRE_Real min_eig, HYPRE_Real fraction, HYPRE_Int order, /* polynomial order */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Int variant, hypre_ParVector *u, /* initial/updated approximation */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *r /*another temp vector */ ) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u)); HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f)); HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v)); HYPRE_Real *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r)); HYPRE_Real theta, delta; HYPRE_Real den; HYPRE_Real upper_bound, lower_bound; HYPRE_Int i, j; HYPRE_Real coefs[5]; HYPRE_Real mult; HYPRE_Real *orig_u; HYPRE_Real tmp_d; HYPRE_Int cheby_order; HYPRE_Real *ds_data, *tmp_data; HYPRE_Real diag; hypre_ParVector *ds; hypre_ParVector *tmp_vec; /* u = u + p(A)r */ if (order > 4) { order = 4; } else if (order < 1) { order = 1; } /* we are using the order of p(A) */ cheby_order = order -1; /* make sure we are large enough - Adams et al. 2003 */ upper_bound = max_eig * 1.1; /* lower_bound = max_eig/fraction; */ lower_bound = (upper_bound - min_eig)* fraction + min_eig; /* theta and delta */ theta = (upper_bound + lower_bound)/2; delta = (upper_bound - lower_bound)/2; if (variant == 1 ) { switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is one less that resid poly: r(t) = 1 - t*s(t) */ { case 0: coefs[0] = 1.0/theta; break; case 1: /* (del - t + 2*th)/(th^2 + del*th) */ den = (theta*theta + delta*theta); coefs[0] = (delta + 2*theta)/den; coefs[1] = -1.0/den; break; case 2: /* (4*del*th - del^2 - t*(2*del + 6*th) + 2*t^2 + 6*th^2)/(2*del*th^2 - del^2*th - del^3 + 2*th^3)*/ den = 2*delta*theta*theta - delta*delta*theta - pow(delta,3) + 2*pow(theta,3); coefs[0] = (4*delta*theta - pow(delta,2) + 6*pow(theta,2))/den; coefs[1] = -(2*delta + 6*theta)/den; coefs[2] = 2/den; break; case 3: /* -(6*del^2*th - 12*del*th^2 - t^2*(4*del + 16*th) + t*(12*del*th - 3*del^2 + 24*th^2) + 3*del^3 + 4*t^3 - 16*th^3)/(4*del*th^3 - 3*del^2*th^2 - 3*del^3*th + 4*th^4)*/ den = - (4*delta*pow(theta,3) - 3*pow(delta,2)*pow(theta,2) - 3*pow(delta,3)*theta + 4*pow(theta,4) ); coefs[0] = (6*pow(delta,2)*theta - 12*delta*pow(theta,2) + 3*pow(delta,3) - 16*pow(theta,3) )/den; coefs[1] = (12*delta*theta - 3*pow(delta,2) + 24*pow(theta,2))/den; coefs[2] = -( 4*delta + 16*theta)/den; coefs[3] = 4/den; break; } } else /* standard chebyshev */ { switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is one less thatn resid poly: r(t) = 1 - t*s(t) */ { case 0: coefs[0] = 1.0/theta; break; case 1: /* ( 2*t - 4*th)/(del^2 - 2*th^2) */ den = delta*delta - 2*theta*theta; coefs[0] = -4*theta/den; coefs[1] = 2/den; break; case 2: /* (3*del^2 - 4*t^2 + 12*t*th - 12*th^2)/(3*del^2*th - 4*th^3)*/ den = 3*(delta*delta)*theta - 4*(theta*theta*theta); coefs[0] = (3*delta*delta - 12 *theta*theta)/den; coefs[1] = 12*theta/den; coefs[2] = -4/den; break; case 3: /*(t*(8*del^2 - 48*th^2) - 16*del^2*th + 32*t^2*th - 8*t^3 + 32*th^3)/(del^4 - 8*del^2*th^2 + 8*th^4)*/ den = pow(delta,4) - 8*delta*delta*theta*theta + 8*pow(theta,4); coefs[0] = (32*pow(theta,3)- 16*delta*delta*theta)/den; coefs[1] = (8*delta*delta - 48*theta*theta)/den; coefs[2] = 32*theta/den; coefs[3] = -8/den; break; } } orig_u = hypre_CTAlloc(HYPRE_Real, num_rows, HYPRE_MEMORY_HOST); if (!scale) { /* get residual: r = f - A*u */ hypre_ParVectorCopy(f, r); hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r); for ( i = 0; i < num_rows; i++ ) { orig_u[i] = u_data[i]; u_data[i] = r_data[i] * coefs[cheby_order]; } for (i = cheby_order - 1; i >= 0; i-- ) { hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v); mult = coefs[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { u_data[j] = mult * r_data[j] + v_data[j]; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for ( i = 0; i < num_rows; i++ ) { u_data[i] = orig_u[i] + u_data[i]; } } else /* scaling! */ { /*grab 1/sqrt(diagonal) */ ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(ds); hypre_ParVectorSetPartitioningOwner(ds,0); ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds)); tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(tmp_vec); hypre_ParVectorSetPartitioningOwner(tmp_vec,0); tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(tmp_vec)); /* get ds_data and get scaled residual: r = D^(-1/2)f - * D^(-1/2)A*u */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,diag) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_rows; j++) { diag = A_diag_data[A_diag_i[j]]; ds_data[j] = 1/sqrt(diag); r_data[j] = ds_data[j] * f_data[j]; } hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { r_data[j] += ds_data[j] * tmp_data[j]; } /* save original u, then start the iteration by multiplying r by the cheby coef.*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { orig_u[j] = u_data[j]; /* orig, unscaled u */ u_data[j] = r_data[j] * coefs[cheby_order]; } /* now do the other coefficients */ for (i = cheby_order - 1; i >= 0; i-- ) { /* v = D^(-1/2)AD^(-1/2)u */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { tmp_data[j] = ds_data[j] * u_data[j]; } hypre_ParCSRMatrixMatvec(1.0, A, tmp_vec, 0.0, v); /* u_new = coef*r + v*/ mult = coefs[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,tmp_d) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { tmp_d = ds_data[j]* v_data[j]; u_data[j] = mult * r_data[j] + tmp_d; } } /* end of cheby_order loop */ /* now we have to scale u_data before adding it to u_orig*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for ( j = 0; j < num_rows; j++ ) { u_data[j] = orig_u[j] + ds_data[j]*u_data[j]; } hypre_ParVectorDestroy(ds); hypre_ParVectorDestroy(tmp_vec); } /* end of scaling code */ hypre_TFree(orig_u, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * CG Smoother *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelax_CG( HYPRE_Solver solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int num_its) { HYPRE_PCGSetMaxIter(solver, num_its); /* max iterations */ HYPRE_PCGSetTol(solver, 0.0); /* max iterations */ HYPRE_ParCSRPCGSolve(solver, (HYPRE_ParCSRMatrix)A, (HYPRE_ParVector)f, (HYPRE_ParVector)u); #if 0 { HYPRE_Int myid; HYPRE_Int num_iterations; HYPRE_Real final_res_norm; hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid); HYPRE_PCGGetNumIterations(solver, &num_iterations); HYPRE_PCGGetFinalRelativeResidualNorm(solver, &final_res_norm); if (myid ==0) { hypre_printf(" -----CG PCG Iterations = %d\n", num_iterations); hypre_printf(" -----CG PCG Final Relative Residual Norm = %e\n", final_res_norm); } } #endif return hypre_error_flag; } /* tql1.f -- this is the eispack translation - from Barry Smith in Petsc Note that this routine always uses real numbers (not complex) even if the underlying matrix is Hermitian. This is because the Lanczos process applied to Hermitian matrices always produces a real, symmetric tridiagonal matrix. */ HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int *n,HYPRE_Real *d,HYPRE_Real *e,HYPRE_Int *ierr) { /* System generated locals */ HYPRE_Int i__1,i__2; HYPRE_Real d__1,d__2,c_b10 = 1.0; /* Local variables */ HYPRE_Real c,f,g,h; HYPRE_Int i,j,l,m; HYPRE_Real p,r,s,c2,c3 = 0.0; HYPRE_Int l1,l2; HYPRE_Real s2 = 0.0; HYPRE_Int ii; HYPRE_Real dl1,el1; HYPRE_Int mml; HYPRE_Real tst1,tst2; /* THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL1, */ /* NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND */ /* WILKINSON. */ /* HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). */ /* THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC */ /* TRIDIAGONAL MATRIX BY THE QL METHOD. */ /* ON INPUT */ /* N IS THE ORDER OF THE MATRIX. */ /* D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. */ /* E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX */ /* IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. */ /* ON OUTPUT */ /* D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN */ /* ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND */ /* ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE */ /* THE SMALLEST EIGENVALUES. */ /* E HAS BEEN DESTROYED. */ /* IERR IS SET TO */ /* ZERO FOR NORMAL RETURN, */ /* J IF THE J-TH EIGENVALUE HAS NOT BEEN */ /* DETERMINED AFTER 30 ITERATIONS. */ /* CALLS CGPTHY FOR DSQRT(A*A + B*B) . */ /* QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, */ /* MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY */ /* THIS VERSION DATED AUGUST 1983. */ /* ------------------------------------------------------------------ */ HYPRE_Real ds; --e; --d; *ierr = 0; if (*n == 1) { goto L1001; } i__1 = *n; for (i = 2; i <= i__1; ++i) { e[i - 1] = e[i]; } f = 0.; tst1 = 0.; e[*n] = 0.; i__1 = *n; for (l = 1; l <= i__1; ++l) { j = 0; h = (d__1 = d[l],fabs(d__1)) + (d__2 = e[l],fabs(d__2)); if (tst1 < h) { tst1 = h; } /* .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... */ i__2 = *n; for (m = l; m <= i__2; ++m) { tst2 = tst1 + (d__1 = e[m],fabs(d__1)); if (tst2 == tst1) { goto L120; } /* .......... E(N) IS ALWAYS ZERO,SO THERE IS NO EXIT */ /* THROUGH THE BOTTOM OF THE LOOP .......... */ } L120: if (m == l) { goto L210; } L130: if (j == 30) { goto L1000; } ++j; /* .......... FORM SHIFT .......... */ l1 = l + 1; l2 = l1 + 1; g = d[l]; p = (d[l1] - g) / (e[l] * 2.); r = hypre_LINPACKcgpthy(&p,&c_b10); ds = 1.0; if (p < 0.0) ds = -1.0; d[l] = e[l] / (p + ds*r); d[l1] = e[l] * (p + ds*r); dl1 = d[l1]; h = g - d[l]; if (l2 > *n) { goto L145; } i__2 = *n; for (i = l2; i <= i__2; ++i) { d[i] -= h; } L145: f += h; /* .......... QL TRANSFORMATION .......... */ p = d[m]; c = 1.; c2 = c; el1 = e[l1]; s = 0.; mml = m - l; /* .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... */ i__2 = mml; for (ii = 1; ii <= i__2; ++ii) { c3 = c2; c2 = c; s2 = s; i = m - ii; g = c * e[i]; h = c * p; r = hypre_LINPACKcgpthy(&p,&e[i]); e[i + 1] = s * r; s = e[i] / r; c = p / r; p = c * d[i] - s * g; d[i + 1] = h + s * (c * g + s * d[i]); } p = -s * s2 * c3 * el1 * e[l] / dl1; e[l] = s * p; d[l] = c * p; tst2 = tst1 + (d__1 = e[l],fabs(d__1)); if (tst2 > tst1) { goto L130; } L210: p = d[l] + f; /* .......... ORDER EIGENVALUES .......... */ if (l == 1) { goto L250; } /* .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... */ i__2 = l; for (ii = 2; ii <= i__2; ++ii) { i = l + 2 - ii; if (p >= d[i - 1]) { goto L270; } d[i] = d[i - 1]; } L250: i = 1; L270: d[i] = p; } goto L1001; /* .......... SET ERROR -- NO CONVERGENCE TO AN */ /* EIGENVALUE AFTER 30 ITERATIONS .......... */ L1000: *ierr = l; L1001: return 0; } /* cgtql1_ */ HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real *a,HYPRE_Real *b) { /* System generated locals */ HYPRE_Real ret_val,d__1,d__2,d__3; /* Local variables */ HYPRE_Real p,r,s,t,u; /* FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW */ /* Computing MAX */ d__1 = fabs(*a),d__2 = fabs(*b); p = hypre_max(d__1,d__2); if (!p) { goto L20; } /* Computing MIN */ d__2 = fabs(*a),d__3 = fabs(*b); /* Computing 2nd power */ d__1 = hypre_min(d__2,d__3) / p; r = d__1 * d__1; L10: t = r + 4.; if (t == 4.) { goto L20; } s = r / t; u = s * 2. + 1.; p = u * p; /* Computing 2nd power */ d__1 = s / u; r = d__1 * d__1 * r; goto L10; L20: ret_val = p; return ret_val; } /* cgpthy_ */
DiracMatrix.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_DIRAC_MATRIX_H #define QMCPLUSPLUS_DIRAC_MATRIX_H #include "Numerics/Blasf.h" #include <OhmmsPETE/OhmmsMatrix.h> #include <type_traits/scalar_traits.h> namespace qmcplusplus { inline void Xgetrf(int n, int m, float* restrict a, int lda, int* restrict piv) { int status; sgetrf(n,m,a,lda,piv,status); } inline void Xgetri(int n, float* restrict a, int lda, int* restrict piv, float* restrict work, int& lwork) { int status; sgetri(n,a,lda,piv,work,lwork,status); } inline void Xgetrf(int n, int m, std::complex<float>* restrict a, int lda, int* restrict piv) { int status; cgetrf(n,m,a,lda,piv,status); } /** inversion of a float matrix after lu factorization*/ inline void Xgetri(int n, std::complex<float>* restrict a, int lda, int* restrict piv, std::complex<float>* restrict work, int& lwork) { int status; cgetri(n,a,lda,piv,work,lwork,status); } inline void Xgetrf(int n, int m, double* restrict a, int lda, int* restrict piv) { int status; dgetrf(n,m,a,lda,piv,status); } inline void Xgetri(int n, double* restrict a, int lda, int* restrict piv, double* restrict work, int& lwork) { int status; dgetri(n,a,lda,piv,work,lwork,status); } inline void Xgetrf(int n, int m, std::complex<double>* restrict a, int lda, int* restrict piv) { int status; zgetrf(n,m,a,lda,piv,status); } /** inversion of a std::complex<double> matrix after lu factorization*/ inline void Xgetri(int n, std::complex<double>* restrict a, int lda, int* restrict piv, std::complex<double>* restrict work, int& lwork) { int status; zgetri(n,a,lda,piv,work,lwork,status); } template<typename TIN, typename TOUT> inline void TansposeSquare(const TIN* restrict in, TOUT* restrict out, size_t n, size_t lda) { #pragma omp simd for(size_t i=0; i<n; ++i) for(size_t j=0; j<n; ++j) out[i*lda+j]=in[i+j*lda]; } template<typename T> inline T computeLogDet(const T* restrict X, int n, int lda, const int* restrict pivot, T& phase) { T logdet(0); int sign_det=1; for(size_t i=0; i<n; i++) { const size_t ii=i*lda+i; sign_det *= (pivot[i] == i+1)?1:-1; sign_det *= (X[ii]>0)?1:-1; logdet += std::log(std::abs(X[ii])); } phase=(sign_det>0)? T(0):M_PI; return logdet; } template<typename T> inline T computeLogDet(const std::complex<T>* restrict X, int n, int lda, const int* restrict pivot, T& phase) { T logdet(0); phase=T(0); for(size_t i=0; i<n; i++) { const size_t ii=i*lda+i; phase += std::arg(X[ii]); if(pivot[i]!=i+1) phase += M_PI; logdet+=std::log(X[ii].real()*X[ii].real()+X[ii].imag()*X[ii].imag()); //slightly smaller error with the following // logdet+=2.0*std::log(std::abs(x[ii]); } constexpr T one_over_2pi=T(1)/TWOPI; phase -= std::floor(phase*one_over_2pi)*TWOPI; return 0.5*logdet; } template<typename T> struct DiracMatrix { typedef typename scalar_traits<T>::real_type real_type; aligned_vector<T> m_work; aligned_vector<int> m_pivot; int Lwork; real_type LogDet; real_type Phase; DiracMatrix():Lwork(0) {} inline void invert(Matrix<T>& amat, bool computeDet) { const int n=amat.rows(); const int lda=amat.cols(); if(Lwork<lda) reset(amat,lda); Xgetrf(n,n,amat.data(),lda,m_pivot.data()); if(computeDet) { LogDet=computeLogDet(amat.data(),n,lda,m_pivot.data(),Phase); } Xgetri(n, amat.data(),lda,m_pivot.data(),m_work.data(),Lwork); } inline void reset(Matrix<T>& amat, const int lda) { m_pivot.resize(lda); Lwork=-1; T tmp; real_type lw; Xgetri(lda, amat.data(),lda,m_pivot.data(),&tmp,Lwork); convert(tmp,lw); Lwork=static_cast<int>(lw); m_work.resize(Lwork); } }; } #endif // QMCPLUSPLUS_DIRAC_MATRIX_H
GB_binop__lor_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__lor_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__lor_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__lor_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_fp32) // A*D function (colscale): GB (_AxD__lor_fp32) // D*A function (rowscale): GB (_DxB__lor_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__lor_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__lor_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_fp32) // C=scalar+B GB (_bind1st__lor_fp32) // C=scalar+B' GB (_bind1st_tran__lor_fp32) // C=A+scalar GB (_bind2nd__lor_fp32) // C=A'+scalar GB (_bind2nd_tran__lor_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_FP32 || GxB_NO_LOR_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lor_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bshift_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint16) // C=scalar+B GB (_bind1st__bshift_uint16) // C=scalar+B' GB (_bind1st_tran__bshift_uint16) // C=A+scalar GB (_bind2nd__bshift_uint16) // C=A'+scalar GB (_bind2nd_tran__bshift_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_bitshift_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT16 || GxB_NO_BSHIFT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_1x1_pack8to4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack8to4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to4, int inch, int outch) { // interleave // src = inch-outch // dst = 4b-8a-inch/8a-outch/4 kernel_tm_pack8to4.create(4 * 8, inch / 8, outch / 8 + (outch % 8) / 4, (size_t)2u * 2, 2); int p = 0; for (; p + 7 < outch; p += 8) { const float* k0 = (const float*)kernel + (p + 0) * inch; const float* k1 = (const float*)kernel + (p + 1) * inch; const float* k2 = (const float*)kernel + (p + 2) * inch; const float* k3 = (const float*)kernel + (p + 3) * inch; const float* k4 = (const float*)kernel + (p + 4) * inch; const float* k5 = (const float*)kernel + (p + 5) * inch; const float* k6 = (const float*)kernel + (p + 6) * inch; const float* k7 = (const float*)kernel + (p + 7) * inch; __fp16* g0 = kernel_tm_pack8to4.channel(p / 8); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g0[0] = (__fp16)k0[i]; g0[1] = (__fp16)k1[i]; g0[2] = (__fp16)k2[i]; g0[3] = (__fp16)k3[i]; g0[4] = (__fp16)k4[i]; g0[5] = (__fp16)k5[i]; g0[6] = (__fp16)k6[i]; g0[7] = (__fp16)k7[i]; g0 += 8; } k0 += 8; k1 += 8; k2 += 8; k3 += 8; k4 += 8; k5 += 8; k6 += 8; k7 += 8; } } for (; p + 3 < outch; p += 4) { const float* k0 = (const float*)kernel + (p + 0) * inch; const float* k1 = (const float*)kernel + (p + 1) * inch; const float* k2 = (const float*)kernel + (p + 2) * inch; const float* k3 = (const float*)kernel + (p + 3) * inch; __fp16* g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g0[0] = (__fp16)k0[i]; g0[1] = (__fp16)k1[i]; g0[2] = (__fp16)k2[i]; g0[3] = (__fp16)k3[i]; g0 += 4; } k0 += 8; k1 += 8; k2 += 8; k3 += 8; } } } static void conv1x1s1_sgemm_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const __fp16* bias = _bias; // interleave Mat tmp; if (size >= 8) tmp.create(8, inch, size / 8 + (size % 8) / 4 + size % 4, elemsize, elempack, opt.workspace_allocator); else if (size >= 4) tmp.create(4, inch, size / 4 + size % 4, elemsize, elempack, opt.workspace_allocator); else // if (size >= 1) tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator); { int nn_size; int remain_size_start = 0; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += bottom_blob.cstep * 8; } } } int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* outptr0 = top_blob.channel(p); __fp16* outptr1 = top_blob.channel(p + 1); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 4 : zeros; float16x8_t _bias0 = vld1q_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "mov v28.16b, %10.16b \n" "mov v29.16b, %10.16b \n" "mov v30.16b, %10.16b \n" "mov v31.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v24.8h, v17.8h, v0.h[4] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[6] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v1.h[0] \n" "fmla v25.8h, v18.8h, v1.h[1] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[3] \n" "fmla v24.8h, v19.8h, v1.h[4] \n" "fmla v25.8h, v19.8h, v1.h[5] \n" "fmla v26.8h, v19.8h, v1.h[6] \n" "fmla v27.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v2.h[0] \n" "fmla v25.8h, v20.8h, v2.h[1] \n" "fmla v26.8h, v20.8h, v2.h[2] \n" "fmla v27.8h, v20.8h, v2.h[3] \n" "fmla v24.8h, v21.8h, v2.h[4] \n" "fmla v25.8h, v21.8h, v2.h[5] \n" "fmla v26.8h, v21.8h, v2.h[6] \n" "fmla v27.8h, v21.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v22.8h, v3.h[0] \n" "fmla v25.8h, v22.8h, v3.h[1] \n" "fmla v26.8h, v22.8h, v3.h[2] \n" "fmla v27.8h, v22.8h, v3.h[3] \n" "fmla v24.8h, v23.8h, v3.h[4] \n" "fmla v25.8h, v23.8h, v3.h[5] \n" "fmla v26.8h, v23.8h, v3.h[6] \n" "fmla v27.8h, v23.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 2); float16x8_t _sum0 = _bias0; for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(tmpptr); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); float16x8_t _k4 = vld1q_f16(kptr + 32); float16x8_t _k5 = vld1q_f16(kptr + 40); float16x8_t _k6 = vld1q_f16(kptr + 48); float16x8_t _k7 = vld1q_f16(kptr + 56); _sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7); kptr += 64; tmpptr += 8; } vst1_f16(outptr0, vget_low_f16(_sum0)); vst1_f16(outptr1, vget_high_f16(_sum0)); outptr0 += 4; outptr1 += 4; } } remain_outch_start += nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[4] = {0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 4 : zeros; float16x4_t _bias0 = vld1_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "mov v28.16b, %8.16b \n" "mov v29.16b, %8.16b \n" "mov v30.16b, %8.16b \n" "mov v31.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v24.4h, v17.4h, v0.h[4] \n" "fmla v25.4h, v17.4h, v0.h[5] \n" "fmla v26.4h, v17.4h, v0.h[6] \n" "fmla v27.4h, v17.4h, v0.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v1.h[0] \n" "fmla v25.4h, v18.4h, v1.h[1] \n" "fmla v26.4h, v18.4h, v1.h[2] \n" "fmla v27.4h, v18.4h, v1.h[3] \n" "fmla v24.4h, v19.4h, v1.h[4] \n" "fmla v25.4h, v19.4h, v1.h[5] \n" "fmla v26.4h, v19.4h, v1.h[6] \n" "fmla v27.4h, v19.4h, v1.h[7] \n" "fmla v24.4h, v20.4h, v2.h[0] \n" "fmla v25.4h, v20.4h, v2.h[1] \n" "fmla v26.4h, v20.4h, v2.h[2] \n" "fmla v27.4h, v20.4h, v2.h[3] \n" "fmla v24.4h, v21.4h, v2.h[4] \n" "fmla v25.4h, v21.4h, v2.h[5] \n" "fmla v26.4h, v21.4h, v2.h[6] \n" "fmla v27.4h, v21.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v22.4h, v3.h[0] \n" "fmla v25.4h, v22.4h, v3.h[1] \n" "fmla v26.4h, v22.4h, v3.h[2] \n" "fmla v27.4h, v22.4h, v3.h[3] \n" "fmla v24.4h, v23.4h, v3.h[4] \n" "fmla v25.4h, v23.4h, v3.h[5] \n" "fmla v26.4h, v23.4h, v3.h[6] \n" "fmla v27.4h, v23.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); float16x4_t _sum0 = _bias0; for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(tmpptr); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); float16x4_t _k4 = vld1_f16(kptr + 16); float16x4_t _k5 = vld1_f16(kptr + 20); float16x4_t _k6 = vld1_f16(kptr + 24); float16x4_t _k7 = vld1_f16(kptr + 28); _sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7); kptr += 32; tmpptr += 8; } vst1_f16(outptr0, _sum0); outptr0 += 4; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // __fp16* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const __fp16* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const __fp16* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); float16x8_t _v2 = vld1q_f16(r0 + 32); float16x8_t _v3 = vld1q_f16(r0 + 48); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); vst1q_f16(outptr + 16, _v2); vst1q_f16(outptr + 24, _v3); r0 += 64; outptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); r0 += 32; outptr += 16; } for (; j < outw; j++) { float16x8_t _v = vld1q_f16(r0); vst1q_f16(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_pack8to4_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
GB_binop__rminus_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fc32) // A*D function (colscale): GB (_AxD__rminus_fc32) // D*A function (rowscale): GB (_DxB__rminus_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fc32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fc32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fc32) // C=scalar+B GB (_bind1st__rminus_fc32) // C=scalar+B' GB (_bind1st_tran__rminus_fc32) // C=A+scalar GB (_bind2nd__rminus_fc32) // C=A'+scalar GB (_bind2nd_tran__rminus_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 0 // BinaryOp: cij = GB_FC32_minus (bij, aij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_minus (y, x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FC32 || GxB_NO_RMINUS_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_minus (bij, x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_minus (y, aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_minus (aij, x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_minus (y, aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
6362.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute dist_schedule(static, 8) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
wpapmk.h
/* * This software is Copyright (c) 2012 Lukas Odzioba <lukas dot odzioba at gmail dot com> * and Copyright (c) 2012-2017 magnum * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * * hccap format was introduced by oclHashcat-plus (now renamed to hashcat), * and it is described here: http://hashcat.net/wiki/hccap * Code is based on Aircrack-ng source */ #ifndef _WPAPMK_H #define _WPAPMK_H #include <stdint.h> #include <assert.h> #if HAVE_OPENSSL_CMAC_H #include <openssl/cmac.h> #endif #include "arch.h" #include "params.h" #include "common.h" #include "johnswap.h" #include "hmacmd5.h" #include "hmac_sha.h" #include "sha2.h" #include "base64_convert.h" #include "hccap.h" #define BINARY_SIZE sizeof(mic_t) #define BINARY_ALIGN 4 #define PLAINTEXT_MIN_LEN 64 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE (sizeof(hccap_t) - sizeof(mic_t)) #define SALT_ALIGN MEM_ALIGN_NONE #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define FORMAT_TAG "$WPAPSK$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) typedef struct { unsigned char keymic[16]; } mic_t; typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH + 1]; } wpapsk_password; typedef struct { uint32_t v[8]; } wpapsk_hash; typedef struct { uint32_t length; #ifdef JOHN_OCL_WPAPMK uint8_t eapol[256 + 64]; uint32_t eapol_size; uint8_t data[64 + 12]; #endif uint8_t salt[36]; // essid } wpapsk_salt; static struct fmt_tests tests[] = { {"$WPAPSK$test#..qHuv0A..ZPYJBRzZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsRIfQN2Zar6EXp2BYcRuSkWEJIWjEJJvb4DWZCspbZ51.21.3zy.EY.6........../zZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsQ..................................................................BoK.31m.E2..31m.U2..31m.U2..31m.U................................................................................................................................................................................/X.....E...AkkDQmDg9837LBHG.dGlKA", "cdd79a5acfb070c7e9d1023b870285d639e430b32f31aa37ac825a55b55524ee"}, {"$WPAPSK$Coherer#..l/Uf7J..qHUXMunTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosMyXdNxfBZUAYmgKqeb6GBPxLiIZr56NtWTGR/Cp5ldAk61.5I0.Ec.2...........nTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosM.................................................................3X.I.E..1uk0.E..1uk2.E..1uk0....................................................................................................................................................................................../t.....U...8FWdk8OpPckhewBwt4MXYI", "a288fcf0caaacda9a9f58633ff35e8992a01d9c10ba5e02efdf8cb5d730ce7bc"}, #if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPMK) {"$WPAPSK$Neheb#g9a8Jcre9D0WrPnEN4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw04ASqHgvo12wJYJywulb6pWM6C5uqiMPNKNe9pkr6LE61.5I0.Eg.2..........1N4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw.................................................................3X.I.E..1uk2.E..1uk2.E..1uk4X...................................................................................................................................................................................../t.....k...0sHl.mVkiHW.ryNchcMd4g", "fb57668cd338374412c26208d79aa5c30ce40a110224f3cfb592a8f2e8bf53e8"}, #endif {NULL} }; /** Below are common variables used by wpapmk_fmt.c and opencl_wpapmk_fmt.c **/ static hccap_t hccap; ///structure with hccap data static wpapsk_salt currentsalt; ///structure for essid static mic_t *mic; ///table for MIC keys #ifndef JOHN_OCL_WPAPMK static wpapsk_hash *outbuffer; ///table for PMK calculated by GPU #endif /** Below are common functions used by wpapmk_fmt.c and opencl_wpapmk_fmt.c **/ static hccap_t *decode_hccap(char *ciphertext) { static hccap_t hccap; char *essid = ciphertext + FORMAT_TAG_LEN; char *hash = strrchr(ciphertext, '#'); char *d = hccap.essid; char *cap = hash + 1; unsigned char tbuf[sizeof(hccap_t)]; unsigned char *dst = tbuf; int i; memset(&hccap, 0, sizeof(hccap)); if (hash == NULL) return &hccap; while (essid != hash) { ///copy essid to hccap *d++ = *essid++; } *d = '\0'; assert(*essid == '#'); for (i = 0; i < 118; i++) { dst[0] = (atoi64[ARCH_INDEX(cap[0])] << 2) | (atoi64[ARCH_INDEX(cap[1])] >> 4); dst[1] = (atoi64[ARCH_INDEX(cap[1])] << 4) | (atoi64[ARCH_INDEX(cap[2])] >> 2); dst[2] = (atoi64[ARCH_INDEX(cap[2])] << 6) | (atoi64[ARCH_INDEX(cap[3])]); dst += 3; cap += 4; } dst[0] = (atoi64[ARCH_INDEX(cap[0])] << 2) | (atoi64[ARCH_INDEX(cap[1])] >> 4); dst[1] = (atoi64[ARCH_INDEX(cap[1])] << 4) | (atoi64[ARCH_INDEX(cap[2])] >> 2); /* This emits warnings on some compilers */ //memcpy(&hccap.mac1,tbuf,sizeof(hccap_t)-36); memcpy(((char*)&hccap) + 36, tbuf, sizeof(hccap_t) - 36); #if !ARCH_LITTLE_ENDIAN hccap.eapol_size = JOHNSWAP(hccap.eapol_size); hccap.keyver = JOHNSWAP(hccap.keyver); #endif return &hccap; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } binary; hccap_t *hccap = decode_hccap(ciphertext); memcpy(binary.c, hccap->keymic, BINARY_SIZE); return binary.c; } static void *get_salt(char *ciphertext) { static hccap_t s; memcpy(&s, decode_hccap(ciphertext), SALT_SIZE); return &s; } static int valid(char *ciphertext, struct fmt_main *self) { char *hash; int hashlength = 0; hccap_t *hccap; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; hash = strrchr(ciphertext, '#'); if (hash == NULL || hash - (ciphertext + FORMAT_TAG_LEN) > 32) return 0; hash++; while (hash < ciphertext + strlen(ciphertext)) { if (atoi64[ARCH_INDEX(*hash++)] == 0x7f) return 0; hashlength++; } if (hashlength != 475) return 0; hccap = decode_hccap(ciphertext); if (strlen(hccap->essid) > 32) /* real life limit */ return 0; if (hccap->eapol_size > 256) return 0; if (hccap->eapol_size < 0) return 0; if (hccap->keyver < 1) return 0; #if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPMK) if (hccap->keyver > 3) return 0; #else if (hccap->keyver > 2) return 0; #endif return 1; } #ifndef JOHN_OCL_WPAPMK static MAYBE_INLINE void prf_512(uint32_t * key, uint8_t * data, uint32_t * ret) { char *text = (char*)"Pairwise key expansion"; unsigned char buff[100]; memcpy(buff, text, 22); memcpy(buff + 23, data, 76); buff[22] = 0; buff[76 + 23] = 0; hmac_sha1((unsigned char*)key, 32, buff, 100, (unsigned char*)ret, 20); } #endif static void insert_mac(uint8_t * data) { int k = memcmp(hccap.mac1, hccap.mac2, 6); if (k > 0) { memcpy(data, hccap.mac2, 6); memcpy(data + 6, hccap.mac1, 6); } else { memcpy(data, hccap.mac1, 6); memcpy(data + 6, hccap.mac2, 6); } } static void insert_nonce(uint8_t * data) { int k = memcmp(hccap.nonce1, hccap.nonce2, 32); if (k > 0) { memcpy(data, hccap.nonce2, 32); memcpy(data + 32, hccap.nonce1, 32); } else { memcpy(data, hccap.nonce1, 32); memcpy(data + 32, hccap.nonce2, 32); } } static void set_salt(void *salt) { memcpy(&hccap, salt, SALT_SIZE); strncpy((char*)currentsalt.salt, hccap.essid, sizeof(currentsalt.salt)); currentsalt.length = strlen(hccap.essid); #ifdef JOHN_OCL_WPAPMK currentsalt.eapol_size = hccap.eapol_size; memcpy(currentsalt.eapol, hccap.eapol, hccap.eapol_size); memset(currentsalt.eapol + hccap.eapol_size, 0x80, 1); memset(currentsalt.eapol + hccap.eapol_size + 1, 0, 256 + 64 - hccap.eapol_size - 1); if (hccap.keyver == 2) alter_endianity(currentsalt.eapol, 256+56); ((unsigned int*)currentsalt.eapol)[16 * ((hccap.eapol_size + 8) / 64) + ((hccap.keyver == 1) ? 14 : 15)] = (64 + hccap.eapol_size) << 3; insert_mac(currentsalt.data); insert_nonce(currentsalt.data + 12); if (hccap.keyver < 3) alter_endianity(currentsalt.data, 64 + 12); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(wpapsk_salt), &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); #endif //Debug_hccap(); } #ifndef JOHN_OCL_WPAPMK #if HAVE_OPENSSL_CMAC_H /* Code borrowed from https://w1.fi/wpa_supplicant/ starts */ #define SHA256_MAC_LEN 32 typedef uint16_t u16; typedef uint8_t u8; static inline void WPA_PUT_LE16(u8 *a, u16 val) { a[1] = val >> 8; a[0] = val & 0xff; } static void sha256_vector(size_t num_elem, const u8 *addr[], const size_t *len, u8 *mac) { SHA256_CTX ctx; size_t i; SHA256_Init(&ctx); for (i = 0; i < num_elem; i++) { SHA256_Update(&ctx, addr[i], len[i]); } SHA256_Final(mac, &ctx); } static void hmac_sha256_vector(const u8 *key, size_t key_len, size_t num_elem, const u8 *addr[], const size_t *len, u8 *mac) { unsigned char k_pad[64]; /* padding - key XORd with ipad/opad */ const u8 *_addr[6]; size_t _len[6], i; /* the HMAC_SHA256 transform looks like: * * SHA256(K XOR opad, SHA256(K XOR ipad, text)) * * where K is an n byte key * ipad is the byte 0x36 repeated 64 times * opad is the byte 0x5c repeated 64 times * and text is the data being protected */ /* start out by storing key in ipad */ memset(k_pad, 0, sizeof(k_pad)); memcpy(k_pad, key, key_len); /* XOR key with ipad values */ for (i = 0; i < 64; i++) k_pad[i] ^= 0x36; /* perform inner SHA256 */ _addr[0] = k_pad; _len[0] = 64; for (i = 0; i < num_elem; i++) { _addr[i + 1] = addr[i]; _len[i + 1] = len[i]; } sha256_vector(1 + num_elem, _addr, _len, mac); memset(k_pad, 0, sizeof(k_pad)); memcpy(k_pad, key, key_len); /* XOR key with opad values */ for (i = 0; i < 64; i++) k_pad[i] ^= 0x5c; /* perform outer SHA256 */ _addr[0] = k_pad; _len[0] = 64; _addr[1] = mac; _len[1] = SHA256_MAC_LEN; sha256_vector(2, _addr, _len, mac); } static void sha256_prf_bits(const u8 *key, size_t key_len, const char *label, const u8 *data, size_t data_len, u8 *buf, size_t buf_len_bits) { u16 counter = 1; size_t pos, plen; u8 hash[SHA256_MAC_LEN]; const u8 *addr[4]; size_t len[4]; u8 counter_le[2], length_le[2]; size_t buf_len = (buf_len_bits + 7) / 8; addr[0] = counter_le; len[0] = 2; addr[1] = (u8 *) label; len[1] = strlen(label); addr[2] = data; len[2] = data_len; addr[3] = length_le; len[3] = sizeof(length_le); WPA_PUT_LE16(length_le, buf_len_bits); pos = 0; while (pos < buf_len) { plen = buf_len - pos; WPA_PUT_LE16(counter_le, counter); if (plen >= SHA256_MAC_LEN) { hmac_sha256_vector(key, key_len, 4, addr, len, &buf[pos]); pos += SHA256_MAC_LEN; } else { hmac_sha256_vector(key, key_len, 4, addr, len, hash); memcpy(&buf[pos], hash, plen); pos += plen; break; } counter++; } /* * Mask out unused bits in the last octet if it does not use all the * bits. */ if (buf_len_bits % 8) { u8 mask = 0xff << (8 - buf_len_bits % 8); buf[pos - 1] &= mask; } } #endif /* HAVE_OPENSSL_CMAC_H */ /* Code borrowed from https://w1.fi/wpa_supplicant/ ends */ static void wpapsk_postprocess(int keys) { int i; uint8_t data[64 + 12]; insert_mac(data); insert_nonce(data + 12); if (hccap.keyver == 1) { #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic) #endif for (i = 0; i < keys; i++) { uint32_t prf[20/4]; HMACMD5Context ctx; prf_512(outbuffer[i].v, data, prf); // PTK hmac_md5_init_K16((unsigned char*)prf, &ctx); hmac_md5_update(hccap.eapol, hccap.eapol_size, &ctx); hmac_md5_final(mic[i].keymic, &ctx); } } else if (hccap.keyver == 2) { #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic) #endif for (i = 0; i < keys; i++) { uint32_t prf[20/4]; prf_512(outbuffer[i].v, data, prf); // PTK hmac_sha1((unsigned char*)prf, 16, hccap.eapol, hccap.eapol_size, mic[i].keymic, 16); } #if HAVE_OPENSSL_CMAC_H } else if (hccap.keyver == 3) { // 802.11w, WPA-PSK-SHA256 #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic) #endif for (i = 0; i < keys; i++) { unsigned char ptk[48]; unsigned char cmic[16]; size_t miclen; CMAC_CTX *ctx; sha256_prf_bits((unsigned char*)outbuffer[i].v, 32, "Pairwise key expansion", data, 76, ptk, 48 * 8); // PTK // Compute MIC ctx = CMAC_CTX_new(); CMAC_Init(ctx, ptk, 16, EVP_aes_128_cbc(), 0); CMAC_Update(ctx, hccap.eapol, hccap.eapol_size); CMAC_Final(ctx, cmic, &miclen); memcpy(mic[i].keymic, cmic, 16); CMAC_CTX_free(ctx); } #endif /* HAVE_OPENSSL_CMAC_H */ } } #endif /* #ifndef JOHN_OCL_WPAPMK */ static int get_hash_0(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_0; } static int get_hash_1(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_1; } static int get_hash_2(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_2; } static int get_hash_3(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_3; } static int get_hash_4(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_4; } static int get_hash_5(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_5; } static int get_hash_6(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_6; } static int cmp_all(void *binary, int count) { uint32_t i, b = ((uint32_t *) binary)[0]; for (i = 0; i < count; i++) { uint32_t *m = (uint32_t*) mic[i].keymic; if (b == m[0]) return 1; } return 0; } static int cmp_one(void *binary, int index) { uint8_t i; uint32_t *b = (uint32_t*) binary; uint32_t *m = (uint32_t*) mic[index].keymic; for (i = 0; i < BINARY_SIZE / 4; i++) if (b[i] != m[i]) return 0; return 1; } static int cmp_exact(char *source, int index) { return 1; } static int salt_compare(const void *x, const void *y) { int c = strncmp((const char*)x, (const char*)y, 36); if (c) return c; return memcmp((const char*)x, (const char*)y, SALT_SIZE); } /* * key version as first tunable cost * 1=WPA (MD5) * 2=WPA2 (SHA1) * 3=802.11w (SHA256) */ static unsigned int get_keyver(void *salt) { hccap_t *my_salt = salt; return (unsigned int) my_salt->keyver; } #endif
pmtv-OpenMP.c
/* gcc -fopenmp -O2 src/pmtv-OpenMP.c -o bin/pmtv-OpenMP */ #include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free() #include <stdio.h> // biblioteca donde se encuentra la función printf() #ifdef _OPENMP #include <omp.h> #else #define omp_set_dynamic(0); #define omp_set_num_threads(4); #endif int main(int argc, char ** argv){ int **M; int *v1, *v2; int i, k, a, N; double cgt1, cgt2, ncgt; //para tiempo de ejecución time_t t; // Semilla de rand() srand((unsigned) time(&t)); // Obtenemos el numero de filas x columnas de la matriz cuadrada if(argc < 4){ fprintf(stderr,"Error: %s <N_filas> <Chunk (0...I)> <Sched (static, dynamic, guided)>\n", argv[0]); exit(-1); } N = atoi(argv[1]); // == Directivas de OpenMP // ====================================================> int chunk = 0; omp_sched_t kind; if(strcmp(argv[2], "default") == 0) omp_get_schedule(&kind, &chunk); else chunk = atoi(argv[2]); // Modificar OMP_SCHEDULE if(strcmp(argv[3], "static") == 0) omp_set_schedule(1, chunk); else if(strcmp(argv[3], "dynamic") == 0) omp_set_schedule(2, chunk); else if(strcmp(argv[3], "guided") == 0) omp_set_schedule(3, chunk); else { printf("Error en el metodo de asignacion de trabajo a las hebras (static, dynamic, guided)\n"); exit(-1); } // El numero de hebras que se vayan a usar debe ser el mismo que el numero de procesadores disponibles omp_set_num_threads(omp_get_num_procs()); // == Reserva de Memoria // ====================================================> v1 = (int *) malloc(N*sizeof(int)); v2 = (int *) malloc(N*sizeof(int)); if ( v1 == NULL || v2 == NULL ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } M = (int**) malloc (N*sizeof(int*)); // i como private en un for establece que cada hebra tendra una copia de i, pero en parallel for tendra cada una i como sigue // i = 0, i = 3, i = 6 para un bucle de N = 9 #pragma omp parallel for shared(M,N) private(i) default(none) schedule(runtime) for(i = 0; i<N; i++){ M[i] = (int*) malloc (N*sizeof(int)); if( M[i] == NULL ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } } // == Inicializacion // ====================================================> // M, v1, v2, N, i compartidas // Cada hebra se encargará de una parte del bucle usando i // k es privada // Para que cada hebra que este calculando la parte iesima del bucle y tenga una copia de k = 0 propia, parte k es secuencial #pragma omp parallel for shared(N,M) private(i,k,a) default(none) schedule(runtime) for(i = 0; i<N; i++){ if(i>0){ for(a = 0; a<i; a++) M[i][a] = 0; for(k = a; k<N; k++) M[i][k] = rand() % 8; } else { for(k = 0; k<N; k++){ M[i][k] = rand() % 8; } } } #pragma omp parallel for shared(v1,v2,N) private(i) default(none) schedule(runtime) for(i = 0; i<N; i++){ v1[i] = rand() % 6; v2[i] = 0; } // == Calculo // ====================================================> cgt1 = omp_get_wtime(); #pragma omp parallel for shared(v1,v2,M,N) private(i,k) default(none) schedule(runtime) for(i = 0; i<N; i++){ for(k = i; k<N; k++) v2[i] += M[i][k] * v1[k]; } cgt2 = omp_get_wtime(); ncgt = (double)(cgt2 - cgt1); // == Imprimir Mensajes // ====================================================> printf("Tiempo(seg.):%11.9f\n", ncgt); printf("Tamaño de los vectores: %u\n", N); printf("\tv1 = %uElem -> %lu bytes\n\tv2 = %uElem -> %lu bytes\n", N, N*sizeof(int), N, N*sizeof(int)); printf("Tamaño de la matriz: %ux%u -> %lu bytes\n", N, N, N*N*sizeof(int)); // Imprimir el primer y último componente del resultado evita que las optimizaciones del compilador // eliminen el código de la suma. printf("v2[0] = %u ... v2[N-1] = %u \n", v2[0], v2[N-1]); // Para tamaños pequeños de N < 15 mostrar los valores calculados if(N < 15){ printf("\n----------- Matriz M ----------- \n"); for(i = 0; i<N; i++){ for(k = 0; k<N; k++) printf("%u\t", M[i][k]); printf("\n"); } printf("\n----------- Vector V1 ----------- \n"); for(i = 0; i<N; i++) printf("%u\t", v1[i]); printf("\n"); printf("\n----------- Vector V2----------- \n"); for(i = 0; i<N; i++) printf("%u\t", v2[i]); printf("\n"); } // == Liberar Memoria // ====================================================> free(v1); free(v2); #pragma omp parallel for shared(M,N) private(i) default(none) schedule(runtime) for(i = 0; i<N; i++) free(M[i]); free(M); }
functions.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "omp.h" #include "functions.h" //compute a*b mod p safely unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } //compute a^b mod p safely unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprod(aExpb, z, p); z = modprod(z, z, p); b /= 2; } return aExpb; } //returns either 0 or 1 randomly unsigned int randomBit() { return rand()%2; } //returns a random integer which is between 2^{n-1} and 2^{n} unsigned int randXbitInt(unsigned int n) { unsigned int r = 1; for (unsigned int i=0; i<n-1; i++) { r = r*2 + randomBit(); } return r; } //tests for primality and return 1 if N is probably prime and 0 if N is composite unsigned int isProbablyPrime(unsigned int N) { if (N%2==0) return 0; //not interested in even numbers (including 2) unsigned int NsmallPrimes = 168; unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997}; //before using a probablistic primality check, check directly using the small primes list for (unsigned int n=1;n<NsmallPrimes;n++) { if (N==smallPrimeList[n]) return 1; //true if (N%smallPrimeList[n]==0) return 0; //false } //if we're testing a large number switch to Miller-Rabin primality test unsigned int r = 0; unsigned int d = N-1; while (d%2 == 0) { d /= 2; r += 1; } for (unsigned int n=0;n<NsmallPrimes;n++) { unsigned int k = smallPrimeList[n]; unsigned int x = modExp(k,d,N); if ((x==1) || (x==N-1)) continue; for (unsigned int i=1;i<r-1;i++) { x = modprod(x,x,N); if (x == 1) return 0; //false if (x == N-1) break; } // see whether we left the loop becasue x==N-1 if (x == N-1) continue; return 0; //false } return 1; //true } //Finds a generator of Z_p using the assumption that p=2*q+1 unsigned int findGenerator(unsigned int p) { unsigned int g; unsigned int q = (p-1)/2; do { //make a random number 1<= g < p g = randXbitInt(32)%p; //could also have passed n to findGenerator } while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1)); return g; } void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g, unsigned int *h, unsigned int *x) { /* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number which satisfies p=2*q+1 where q is also prime */ unsigned int q; do { *p = randXbitInt(n); q = (*p-1)/2; } while (!isProbablyPrime(*p) || !isProbablyPrime(q)); /* Use the fact that p=2*q+1 to quickly find a generator */ *g = findGenerator(*p); //pick a secret key, x *x = randXbitInt(n)%(*p); //compute h *h = modExp(*g,*x,*p); printf("ElGamal Setup successful.\n"); printf("p = %u. \n", *p); printf("g = %u is a generator of Z_%u \n", *g, *p); printf("Secret key: x = %u \n", *x); printf("h = g^x = %u\n", *h); printf("\n"); } void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int g, unsigned int h) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //pick y in Z_p randomly unsigned int y; do { y = randXbitInt(32)%p; } while (y==0); //dont allow y=0 //compute a = g^y a[i] = modExp(g,y,p); //compute s = h^y unsigned int s = modExp(h,y,p); //encrypt m by multiplying with s m[i] = modprod(m[i],s,p); } } void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int x) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //compute s = a^x unsigned int s = modExp(a[i],x,p); //compute s^{-1} = s^{p-2} unsigned int invS = modExp(s,p-2,p); //decrypt message by multplying by invS m[i] = modprod(m[i],invS,p); } } //Pad the end of string so its length is divisible by Nchars // Assume there is enough allocated storage for the padded string void padString(unsigned char* string, unsigned int charsPerInt) { /* Q1.2 Complete this function */ int i = 1; while(strlen(string)%charsPerInt != 0) { *(string +strlen(string)) = ' '; i++; } *(string + strlen(string)) = '\0'; } void convertStringToZ(unsigned char *string, unsigned int Nchars, unsigned int *Z, unsigned int Nints) { unsigned int charsPerInt = Nchars/Nints; /* Q1.3 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ #pragma omp parallel for for(int i = 0; i < Nints; i++) { for (int j = 1; j < charsPerInt; j++) { int nxtChar = string[i*(charsPerInt)+j] *256; Z[i] |= Z[i] + nxtChar; } //end inner for loop } // end outer for } // end convertStringToZ function void convertZToString(unsigned int *Z, unsigned int Nints, unsigned char *string, unsigned int Nchars) { unsigned int charsPerInt = Nchars/Nints; /* Q1.4 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ #pragma omp parallel for for(int i = 0; i < Nints; i++) { for (int j = 1; j < charsPerInt; j++) { Z[i] = Z[i] / 256; string[i*(charsPerInt)+j] = Z[i] & 0xFF; } //end inner for loop } // end outer for } // end convertZToString method
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This represents the stack of attributes that were pushed by /// \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; SmallVector<PragmaAttributeEntry, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { clang::Module *Module = nullptr; bool ModuleInterface = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' Partition, ///< 'module partition X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path); /// The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); /// Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf ///< Condition in a constexpr if statement. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. /// /// \param AllowTopLevelCond Whether to allow the result to be the /// complete top-level condition. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive, bool allowArrayTypes); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, bool IsConstexprSpecified); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation()); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// Called on well-formed '\#pragma clang attribute push'. void ActOnPragmaAttributePush(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); void AddNSConsumedAttr(SourceRange AttrRange, Decl *D, unsigned SpellingListIndex, bool isNSConsumed, bool isTemplateInstantiation); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Set to true inside '#pragma omp declare target' region. bool IsInOpenMPDeclareTargetContext = false; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D) const; ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return IsInOpenMPDeclareTargetContext; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return true if (un)supported features for the current target should be /// diagnosed if OpenMP (offloading) is enabled. bool shouldDiagnoseTargetSupportFromOpenMP() const { return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() || isInOpenMPTargetExecutionDirective(); } /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause *ActOnOpenMPToClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> CUDADeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> CUDAKnownEmittedFns; /// A partial call graph maintained during CUDA compilation to support /// deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to CUDAKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> CUDACallGraph; /// Diagnostic builder for CUDA errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class CUDADiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; CUDADiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); ~CUDADiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (CUDADiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a CUDADiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const CUDADiagBuilder &operator<<(const CUDADiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiag.hasValue()) *Diag.PartialDiag << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<PartialDiagnostic> PartialDiag; }; /// Creates a CUDADiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. CUDADiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a CUDADiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. CUDADiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDefaultedMemberExceptionSpecs.empty() && "there shouldn't be any pending delayed defaulted member " "exception specs"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedExceptionSpecChecks) SavedExceptionSpecChecks; decltype(DelayedDefaultedMemberExceptionSpecs) SavedDefaultedMemberExceptionSpecs; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedExceptionSpecChecks.swap(S.DelayedExceptionSpecChecks); SavedDefaultedMemberExceptionSpecs.swap( S.DelayedDefaultedMemberExceptionSpecs); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
feature.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF EEEEE AAA TTTTT U U RRRR EEEEE % % F E A A T U U R R E % % FFF EEE AAAAA T U U RRRR EEE % % F E A A T U U R R E % % F EEEEE A A T UUU R R EEEEE % % % % % % MagickCore Image Feature Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/animate.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/feature.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/image-private.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/timer.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l F e a t u r e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelFeatures() returns features for each channel in the image in % each of four directions (horizontal, vertical, left and right diagonals) % for the specified distance. The features include the angular second % moment, contrast, correlation, sum of squares: variance, inverse difference % moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information % measures of correlation 2, and maximum correlation coefficient. You can % access the red channel contrast, for example, like this: % % channel_features=GetImageChannelFeatures(image,1,exception); % contrast=channel_features[RedChannel].contrast[0]; % % Use MagickRelinquishMemory() to free the features buffer. % % The format of the GetImageChannelFeatures method is: % % ChannelFeatures *GetImageChannelFeatures(const Image *image, % const size_t distance,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o distance: the distance. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t MagickAbsoluteValue(const ssize_t x) { if (x < 0) return(-x); return(x); } MagickExport ChannelFeatures *GetImageChannelFeatures(const Image *image, const size_t distance,ExceptionInfo *exception) { typedef struct _ChannelStatistics { DoublePixelPacket direction[4]; /* horizontal, vertical, left and right diagonals */ } ChannelStatistics; CacheView *image_view; ChannelFeatures *channel_features; ChannelStatistics **cooccurrence, correlation, *density_x, *density_xy, *density_y, entropy_x, entropy_xy, entropy_xy1, entropy_xy2, entropy_y, mean, **Q, *sum, sum_squares, variance; LongPixelPacket gray, *grays; MagickBooleanType status; register ssize_t i; size_t length; ssize_t y, z; unsigned int number_grays; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns < (distance+1)) || (image->rows < (distance+1))) return((ChannelFeatures *) NULL); length=CompositeChannels+1UL; channel_features=(ChannelFeatures *) AcquireQuantumMemory(length, sizeof(*channel_features)); if (channel_features == (ChannelFeatures *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_features,0,length* sizeof(*channel_features)); /* Form grays. */ grays=(LongPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays)); if (grays == (LongPixelPacket *) NULL) { channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } for (i=0; i <= (ssize_t) MaxMap; i++) { grays[i].red=(~0U); grays[i].green=(~0U); grays[i].blue=(~0U); grays[i].opacity=(~0U); grays[i].index=(~0U); } status=MagickTrue; image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { grays[ScaleQuantumToMap(GetRedPixelComponent(p))].red= ScaleQuantumToMap(GetRedPixelComponent(p)); grays[ScaleQuantumToMap(GetGreenPixelComponent(p))].green= ScaleQuantumToMap(GetGreenPixelComponent(p)); grays[ScaleQuantumToMap(GetBluePixelComponent(p))].blue= ScaleQuantumToMap(GetBluePixelComponent(p)); if (image->matte != MagickFalse) grays[ScaleQuantumToMap(GetOpacityPixelComponent(p))].opacity= ScaleQuantumToMap(GetOpacityPixelComponent(p)); if (image->colorspace == CMYKColorspace) grays[ScaleQuantumToMap(GetIndexPixelComponent(indexes+x))].index= ScaleQuantumToMap(GetIndexPixelComponent(indexes+x)); p++; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); return(channel_features); } (void) ResetMagickMemory(&gray,0,sizeof(gray)); for (i=0; i <= (ssize_t) MaxMap; i++) { if (grays[i].red != ~0U) grays[gray.red++].red=grays[i].red; if (grays[i].green != ~0U) grays[gray.green++].green=grays[i].green; if (grays[i].blue != ~0U) grays[gray.blue++].blue=grays[i].blue; if (image->matte != MagickFalse) if (grays[i].opacity != ~0U) grays[gray.opacity++].opacity=grays[i].opacity; if (image->colorspace == CMYKColorspace) if (grays[i].index != ~0U) grays[gray.index++].index=grays[i].index; } /* Allocate spatial dependence matrix. */ number_grays=gray.red; if (gray.green > number_grays) number_grays=gray.green; if (gray.blue > number_grays) number_grays=gray.blue; if (image->matte != MagickFalse) if (gray.opacity > number_grays) number_grays=gray.opacity; if (image->colorspace == CMYKColorspace) if (gray.index > number_grays) number_grays=gray.index; cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays, sizeof(*cooccurrence)); density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_x)); density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_xy)); density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_y)); Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q)); sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum)); if ((cooccurrence == (ChannelStatistics **) NULL) || (density_x == (ChannelStatistics *) NULL) || (density_xy == (ChannelStatistics *) NULL) || (density_y == (ChannelStatistics *) NULL) || (Q == (ChannelStatistics **) NULL) || (sum == (ChannelStatistics *) NULL)) { if (Q != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); } if (sum != (ChannelStatistics *) NULL) sum=(ChannelStatistics *) RelinquishMagickMemory(sum); if (density_y != (ChannelStatistics *) NULL) density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); if (density_xy != (ChannelStatistics *) NULL) density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); if (density_x != (ChannelStatistics *) NULL) density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); if (cooccurrence != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory( cooccurrence); } grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } (void) ResetMagickMemory(&correlation,0,sizeof(correlation)); (void) ResetMagickMemory(density_x,0,2*(number_grays+1)*sizeof(*density_x)); (void) ResetMagickMemory(density_xy,0,2*(number_grays+1)*sizeof(*density_xy)); (void) ResetMagickMemory(density_y,0,2*(number_grays+1)*sizeof(*density_y)); (void) ResetMagickMemory(&mean,0,sizeof(mean)); (void) ResetMagickMemory(sum,0,number_grays*sizeof(*sum)); (void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares)); (void) ResetMagickMemory(density_xy,0,2*number_grays*sizeof(*density_xy)); (void) ResetMagickMemory(&entropy_x,0,sizeof(entropy_x)); (void) ResetMagickMemory(&entropy_xy,0,sizeof(entropy_xy)); (void) ResetMagickMemory(&entropy_xy1,0,sizeof(entropy_xy1)); (void) ResetMagickMemory(&entropy_xy2,0,sizeof(entropy_xy2)); (void) ResetMagickMemory(&entropy_y,0,sizeof(entropy_y)); (void) ResetMagickMemory(&variance,0,sizeof(variance)); for (i=0; i < (ssize_t) number_grays; i++) { cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays, sizeof(**cooccurrence)); Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q)); if ((cooccurrence[i] == (ChannelStatistics *) NULL) || (Q[i] == (ChannelStatistics *) NULL)) break; (void) ResetMagickMemory(cooccurrence[i],0,number_grays* sizeof(**cooccurrence)); (void) ResetMagickMemory(Q[i],0,number_grays*sizeof(**Q)); } if (i < (ssize_t) number_grays) { for (i--; i >= 0; i--) { if (Q[i] != (ChannelStatistics *) NULL) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); if (cooccurrence[i] != (ChannelStatistics *) NULL) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); } Q=(ChannelStatistics **) RelinquishMagickMemory(Q); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); sum=(ChannelStatistics *) RelinquishMagickMemory(sum); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Initialize spatial dependence matrix. */ status=MagickTrue; image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; ssize_t offset, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,y,image->columns+ 2*distance,distance+2,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); p+=distance; indexes+=distance; for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < 4; i++) { switch (i) { case 0: default: { /* Horizontal adjacency. */ offset=(ssize_t) distance; break; } case 1: { /* Vertical adjacency. */ offset=(ssize_t) (image->columns+2*distance); break; } case 2: { /* Right diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)-distance); break; } case 3: { /* Left diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)+distance); break; } } u=0; v=0; while (grays[u].red != ScaleQuantumToMap(GetRedPixelComponent(p))) u++; while (grays[v].red != ScaleQuantumToMap(GetRedPixelComponent(p+offset))) v++; cooccurrence[u][v].direction[i].red++; cooccurrence[v][u].direction[i].red++; u=0; v=0; while (grays[u].green != ScaleQuantumToMap(GetGreenPixelComponent(p))) u++; while (grays[v].green != ScaleQuantumToMap(GetGreenPixelComponent(p+offset))) v++; cooccurrence[u][v].direction[i].green++; cooccurrence[v][u].direction[i].green++; u=0; v=0; while (grays[u].blue != ScaleQuantumToMap(GetBluePixelComponent(p))) u++; while (grays[v].blue != ScaleQuantumToMap((p+offset)->blue)) v++; cooccurrence[u][v].direction[i].blue++; cooccurrence[v][u].direction[i].blue++; if (image->matte != MagickFalse) { u=0; v=0; while (grays[u].opacity != ScaleQuantumToMap(GetOpacityPixelComponent(p))) u++; while (grays[v].opacity != ScaleQuantumToMap((p+offset)->opacity)) v++; cooccurrence[u][v].direction[i].opacity++; cooccurrence[v][u].direction[i].opacity++; } if (image->colorspace == CMYKColorspace) { u=0; v=0; while (grays[u].index != ScaleQuantumToMap(GetIndexPixelComponent(indexes+x))) u++; while (grays[v].index != ScaleQuantumToMap(GetIndexPixelComponent(indexes+x+offset))) v++; cooccurrence[u][v].direction[i].index++; cooccurrence[v][u].direction[i].index++; } } p++; } } grays=(LongPixelPacket *) RelinquishMagickMemory(grays); image_view=DestroyCacheView(image_view); if (status == MagickFalse) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Normalize spatial dependence matrix. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { double normalize; switch (i) { case 0: default: { /* Horizontal adjacency. */ normalize=2.0*image->rows*(image->columns-distance); break; } case 1: { /* Vertical adjacency. */ normalize=2.0*(image->rows-distance)*image->columns; break; } case 2: { /* Right diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } case 3: { /* Left diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } } for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { cooccurrence[x][y].direction[i].red/=normalize; cooccurrence[x][y].direction[i].green/=normalize; cooccurrence[x][y].direction[i].blue/=normalize; if (image->matte != MagickFalse) cooccurrence[x][y].direction[i].opacity/=normalize; if (image->colorspace == CMYKColorspace) cooccurrence[x][y].direction[i].index/=normalize; } } } /* Compute texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Angular second moment: measure of homogeneity of the image. */ channel_features[RedChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].red* cooccurrence[x][y].direction[i].red; channel_features[GreenChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].green* cooccurrence[x][y].direction[i].green; channel_features[BlueChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].blue* cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) channel_features[OpacityChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].opacity* cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) channel_features[BlackChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].index* cooccurrence[x][y].direction[i].index; /* Correlation: measure of linear-dependencies in the image. */ sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red; sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green; sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) sum[y].direction[i].opacity+=cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) sum[y].direction[i].index+=cooccurrence[x][y].direction[i].index; correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red; correlation.direction[i].green+=x*y* cooccurrence[x][y].direction[i].green; correlation.direction[i].blue+=x*y* cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) correlation.direction[i].opacity+=x*y* cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) correlation.direction[i].index+=x*y* cooccurrence[x][y].direction[i].index; /* Inverse Difference Moment. */ channel_features[RedChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1); channel_features[GreenChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1); channel_features[BlueChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1); if (image->matte != MagickFalse) channel_features[OpacityChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].opacity/((y-x)*(y-x)+1); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].index/((y-x)*(y-x)+1); /* Sum average. */ density_xy[y+x+2].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[y+x+2].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[y+x+2].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) density_xy[y+x+2].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) density_xy[y+x+2].direction[i].index+= cooccurrence[x][y].direction[i].index; /* Entropy. */ channel_features[RedChannel].entropy[i]-= cooccurrence[x][y].direction[i].red* log10(cooccurrence[x][y].direction[i].red+MagickEpsilon); channel_features[GreenChannel].entropy[i]-= cooccurrence[x][y].direction[i].green* log10(cooccurrence[x][y].direction[i].green+MagickEpsilon); channel_features[BlueChannel].entropy[i]-= cooccurrence[x][y].direction[i].blue* log10(cooccurrence[x][y].direction[i].blue+MagickEpsilon); if (image->matte != MagickFalse) channel_features[OpacityChannel].entropy[i]-= cooccurrence[x][y].direction[i].opacity* log10(cooccurrence[x][y].direction[i].opacity+MagickEpsilon); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].entropy[i]-= cooccurrence[x][y].direction[i].index* log10(cooccurrence[x][y].direction[i].index+MagickEpsilon); /* Information Measures of Correlation. */ density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red; density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green; density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) density_x[x].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) density_x[x].direction[i].index+= cooccurrence[x][y].direction[i].index; density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red; density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green; density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) density_y[y].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) density_y[y].direction[i].index+= cooccurrence[x][y].direction[i].index; } mean.direction[i].red+=y*sum[y].direction[i].red; sum_squares.direction[i].red+=y*y*sum[y].direction[i].red; mean.direction[i].green+=y*sum[y].direction[i].green; sum_squares.direction[i].green+=y*y*sum[y].direction[i].green; mean.direction[i].blue+=y*sum[y].direction[i].blue; sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue; if (image->matte != MagickFalse) { mean.direction[i].opacity+=y*sum[y].direction[i].opacity; sum_squares.direction[i].opacity+=y*y*sum[y].direction[i].opacity; } if (image->colorspace == CMYKColorspace) { mean.direction[i].index+=y*sum[y].direction[i].index; sum_squares.direction[i].index+=y*y*sum[y].direction[i].index; } } /* Correlation: measure of linear-dependencies in the image. */ channel_features[RedChannel].correlation[i]= (correlation.direction[i].red-mean.direction[i].red* mean.direction[i].red)/(sqrt(sum_squares.direction[i].red- (mean.direction[i].red*mean.direction[i].red))*sqrt( sum_squares.direction[i].red-(mean.direction[i].red* mean.direction[i].red))); channel_features[GreenChannel].correlation[i]= (correlation.direction[i].green-mean.direction[i].green* mean.direction[i].green)/(sqrt(sum_squares.direction[i].green- (mean.direction[i].green*mean.direction[i].green))*sqrt( sum_squares.direction[i].green-(mean.direction[i].green* mean.direction[i].green))); channel_features[BlueChannel].correlation[i]= (correlation.direction[i].blue-mean.direction[i].blue* mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue- (mean.direction[i].blue*mean.direction[i].blue))*sqrt( sum_squares.direction[i].blue-(mean.direction[i].blue* mean.direction[i].blue))); if (image->matte != MagickFalse) channel_features[OpacityChannel].correlation[i]= (correlation.direction[i].opacity-mean.direction[i].opacity* mean.direction[i].opacity)/(sqrt(sum_squares.direction[i].opacity- (mean.direction[i].opacity*mean.direction[i].opacity))*sqrt( sum_squares.direction[i].opacity-(mean.direction[i].opacity* mean.direction[i].opacity))); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].correlation[i]= (correlation.direction[i].index-mean.direction[i].index* mean.direction[i].index)/(sqrt(sum_squares.direction[i].index- (mean.direction[i].index*mean.direction[i].index))*sqrt( sum_squares.direction[i].index-(mean.direction[i].index* mean.direction[i].index))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=2; x < (ssize_t) (2*number_grays); x++) { /* Sum average. */ channel_features[RedChannel].sum_average[i]+= x*density_xy[x].direction[i].red; channel_features[GreenChannel].sum_average[i]+= x*density_xy[x].direction[i].green; channel_features[BlueChannel].sum_average[i]+= x*density_xy[x].direction[i].blue; if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_average[i]+= x*density_xy[x].direction[i].opacity; if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_average[i]+= x*density_xy[x].direction[i].index; /* Sum entropy. */ channel_features[RedChannel].sum_entropy[i]-= density_xy[x].direction[i].red* log10(density_xy[x].direction[i].red+MagickEpsilon); channel_features[GreenChannel].sum_entropy[i]-= density_xy[x].direction[i].green* log10(density_xy[x].direction[i].green+MagickEpsilon); channel_features[BlueChannel].sum_entropy[i]-= density_xy[x].direction[i].blue* log10(density_xy[x].direction[i].blue+MagickEpsilon); if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_entropy[i]-= density_xy[x].direction[i].opacity* log10(density_xy[x].direction[i].opacity+MagickEpsilon); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_entropy[i]-= density_xy[x].direction[i].index* log10(density_xy[x].direction[i].index+MagickEpsilon); /* Sum variance. */ channel_features[RedChannel].sum_variance[i]+= (x-channel_features[RedChannel].sum_entropy[i])* (x-channel_features[RedChannel].sum_entropy[i])* density_xy[x].direction[i].red; channel_features[GreenChannel].sum_variance[i]+= (x-channel_features[GreenChannel].sum_entropy[i])* (x-channel_features[GreenChannel].sum_entropy[i])* density_xy[x].direction[i].green; channel_features[BlueChannel].sum_variance[i]+= (x-channel_features[BlueChannel].sum_entropy[i])* (x-channel_features[BlueChannel].sum_entropy[i])* density_xy[x].direction[i].blue; if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_variance[i]+= (x-channel_features[OpacityChannel].sum_entropy[i])* (x-channel_features[OpacityChannel].sum_entropy[i])* density_xy[x].direction[i].opacity; if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_variance[i]+= (x-channel_features[IndexChannel].sum_entropy[i])* (x-channel_features[IndexChannel].sum_entropy[i])* density_xy[x].direction[i].index; } } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Sum of Squares: Variance */ variance.direction[i].red+=(y-mean.direction[i].red+1)* (y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red; variance.direction[i].green+=(y-mean.direction[i].green+1)* (y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green; variance.direction[i].blue+=(y-mean.direction[i].blue+1)* (y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) variance.direction[i].opacity+=(y-mean.direction[i].opacity+1)* (y-mean.direction[i].opacity+1)* cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) variance.direction[i].index+=(y-mean.direction[i].index+1)* (y-mean.direction[i].index+1)*cooccurrence[x][y].direction[i].index; /* Sum average / Difference Variance. */ density_xy[MagickAbsoluteValue(y-x)].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[MagickAbsoluteValue(y-x)].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) density_xy[MagickAbsoluteValue(y-x)].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) density_xy[MagickAbsoluteValue(y-x)].direction[i].index+= cooccurrence[x][y].direction[i].index; /* Information Measures of Correlation. */ entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red* log10(cooccurrence[x][y].direction[i].red+MagickEpsilon); entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green* log10(cooccurrence[x][y].direction[i].green+MagickEpsilon); entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue* log10(cooccurrence[x][y].direction[i].blue+MagickEpsilon); if (image->matte != MagickFalse) entropy_xy.direction[i].opacity-= cooccurrence[x][y].direction[i].opacity*log10( cooccurrence[x][y].direction[i].opacity+MagickEpsilon); if (image->colorspace == CMYKColorspace) entropy_xy.direction[i].index-=cooccurrence[x][y].direction[i].index* log10(cooccurrence[x][y].direction[i].index+MagickEpsilon); entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red* log10(density_x[x].direction[i].red*density_y[y].direction[i].red+ MagickEpsilon)); entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green* log10(density_x[x].direction[i].green*density_y[y].direction[i].green+ MagickEpsilon)); entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue* log10(density_x[x].direction[i].blue*density_y[y].direction[i].blue+ MagickEpsilon)); if (image->matte != MagickFalse) entropy_xy1.direction[i].opacity-=( cooccurrence[x][y].direction[i].opacity*log10( density_x[x].direction[i].opacity*density_y[y].direction[i].opacity+ MagickEpsilon)); if (image->colorspace == CMYKColorspace) entropy_xy1.direction[i].index-=( cooccurrence[x][y].direction[i].index*log10( density_x[x].direction[i].index*density_y[y].direction[i].index+ MagickEpsilon)); entropy_xy2.direction[i].red-=(density_x[x].direction[i].red* density_y[y].direction[i].red*log10(density_x[x].direction[i].red* density_y[y].direction[i].red+MagickEpsilon)); entropy_xy2.direction[i].green-=(density_x[x].direction[i].green* density_y[y].direction[i].green*log10(density_x[x].direction[i].green* density_y[y].direction[i].green+MagickEpsilon)); entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue* density_y[y].direction[i].blue*log10(density_x[x].direction[i].blue* density_y[y].direction[i].blue+MagickEpsilon)); if (image->matte != MagickFalse) entropy_xy2.direction[i].opacity-=(density_x[x].direction[i].opacity* density_y[y].direction[i].opacity*log10( density_x[x].direction[i].opacity*density_y[y].direction[i].opacity+ MagickEpsilon)); if (image->colorspace == CMYKColorspace) entropy_xy2.direction[i].index-=(density_x[x].direction[i].index* density_y[y].direction[i].index*log10( density_x[x].direction[i].index*density_y[y].direction[i].index+ MagickEpsilon)); } } channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].red; channel_features[GreenChannel].variance_sum_of_squares[i]= variance.direction[i].green; channel_features[BlueChannel].variance_sum_of_squares[i]= variance.direction[i].blue; if (image->matte != MagickFalse) channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].opacity; if (image->colorspace == CMYKColorspace) channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].index; } /* Compute more texture features. */ (void) ResetMagickMemory(&variance,0,sizeof(variance)); (void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Difference variance. */ variance.direction[i].red+=density_xy[x].direction[i].red; variance.direction[i].green+=density_xy[x].direction[i].green; variance.direction[i].blue+=density_xy[x].direction[i].blue; if (image->matte != MagickFalse) variance.direction[i].opacity+=density_xy[x].direction[i].opacity; if (image->colorspace == CMYKColorspace) variance.direction[i].index+=density_xy[x].direction[i].index; sum_squares.direction[i].red+=density_xy[x].direction[i].red* density_xy[x].direction[i].red; sum_squares.direction[i].green+=density_xy[x].direction[i].green* density_xy[x].direction[i].green; sum_squares.direction[i].blue+=density_xy[x].direction[i].blue* density_xy[x].direction[i].blue; if (image->matte != MagickFalse) sum_squares.direction[i].opacity+=density_xy[x].direction[i].opacity* density_xy[x].direction[i].opacity; if (image->colorspace == CMYKColorspace) sum_squares.direction[i].index+=density_xy[x].direction[i].index* density_xy[x].direction[i].index; /* Difference entropy. */ channel_features[RedChannel].difference_entropy[i]-= density_xy[x].direction[i].red* log10(density_xy[x].direction[i].red+MagickEpsilon); channel_features[GreenChannel].difference_entropy[i]-= density_xy[x].direction[i].green* log10(density_xy[x].direction[i].green+MagickEpsilon); channel_features[BlueChannel].difference_entropy[i]-= density_xy[x].direction[i].blue* log10(density_xy[x].direction[i].blue+MagickEpsilon); if (image->matte != MagickFalse) channel_features[OpacityChannel].difference_entropy[i]-= density_xy[x].direction[i].opacity* log10(density_xy[x].direction[i].opacity+MagickEpsilon); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].difference_entropy[i]-= density_xy[x].direction[i].index* log10(density_xy[x].direction[i].index+MagickEpsilon); /* Information Measures of Correlation. */ entropy_x.direction[i].red-=(density_x[x].direction[i].red* log10(density_x[x].direction[i].red+MagickEpsilon)); entropy_x.direction[i].green-=(density_x[x].direction[i].green* log10(density_x[x].direction[i].green+MagickEpsilon)); entropy_x.direction[i].blue-=(density_x[x].direction[i].blue* log10(density_x[x].direction[i].blue+MagickEpsilon)); if (image->matte != MagickFalse) entropy_x.direction[i].opacity-=(density_x[x].direction[i].opacity* log10(density_x[x].direction[i].opacity+MagickEpsilon)); if (image->colorspace == CMYKColorspace) entropy_x.direction[i].index-=(density_x[x].direction[i].index* log10(density_x[x].direction[i].index+MagickEpsilon)); entropy_y.direction[i].red-=(density_y[y].direction[i].red* log10(density_y[y].direction[i].red+MagickEpsilon)); entropy_y.direction[i].green-=(density_y[y].direction[i].green* log10(density_y[y].direction[i].green+MagickEpsilon)); entropy_y.direction[i].blue-=(density_y[y].direction[i].blue* log10(density_y[y].direction[i].blue+MagickEpsilon)); if (image->matte != MagickFalse) entropy_y.direction[i].opacity-=(density_y[y].direction[i].opacity* log10(density_y[y].direction[i].opacity+MagickEpsilon)); if (image->colorspace == CMYKColorspace) entropy_y.direction[i].index-=(density_y[y].direction[i].index* log10(density_y[y].direction[i].index+MagickEpsilon)); } /* Difference variance. */ channel_features[RedChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].red)- (variance.direction[i].red*variance.direction[i].red))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[GreenChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].green)- (variance.direction[i].green*variance.direction[i].green))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[BlueChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].blue)- (variance.direction[i].blue*variance.direction[i].blue))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->matte != MagickFalse) channel_features[OpacityChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].opacity)- (variance.direction[i].opacity*variance.direction[i].opacity))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].index)- (variance.direction[i].index*variance.direction[i].index))/ ((double) number_grays*number_grays*number_grays*number_grays); /* Information Measures of Correlation. */ channel_features[RedChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/ (entropy_x.direction[i].red > entropy_y.direction[i].red ? entropy_x.direction[i].red : entropy_y.direction[i].red); channel_features[GreenChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/ (entropy_x.direction[i].green > entropy_y.direction[i].green ? entropy_x.direction[i].green : entropy_y.direction[i].green); channel_features[BlueChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/ (entropy_x.direction[i].blue > entropy_y.direction[i].blue ? entropy_x.direction[i].blue : entropy_y.direction[i].blue); if (image->matte != MagickFalse) channel_features[OpacityChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].opacity-entropy_xy1.direction[i].opacity)/ (entropy_x.direction[i].opacity > entropy_y.direction[i].opacity ? entropy_x.direction[i].opacity : entropy_y.direction[i].opacity); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].index-entropy_xy1.direction[i].index)/ (entropy_x.direction[i].index > entropy_y.direction[i].index ? entropy_x.direction[i].index : entropy_y.direction[i].index); channel_features[RedChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].red- entropy_xy.direction[i].red))))); channel_features[GreenChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].green- entropy_xy.direction[i].green))))); channel_features[BlueChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].blue- entropy_xy.direction[i].blue))))); if (image->matte != MagickFalse) channel_features[OpacityChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].opacity- entropy_xy.direction[i].opacity))))); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].index- entropy_xy.direction[i].index))))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { for (z=0; z < (ssize_t) number_grays; z++) { register ssize_t y; ChannelStatistics pixel; (void) ResetMagickMemory(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Contrast: amount of local variations present in an image. */ if (((y-x) == z) || ((x-y) == z)) { pixel.direction[i].red+=cooccurrence[x][y].direction[i].red; pixel.direction[i].green+=cooccurrence[x][y].direction[i].green; pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) pixel.direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) pixel.direction[i].index+=cooccurrence[x][y].direction[i].index; } /* Maximum Correlation Coefficient. */ Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red* cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/ density_y[x].direction[i].red; Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green* cooccurrence[y][x].direction[i].green/ density_x[z].direction[i].green/density_y[x].direction[i].red; Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue* cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/ density_y[x].direction[i].blue; if (image->matte != MagickFalse) Q[z][y].direction[i].opacity+= cooccurrence[z][x].direction[i].opacity* cooccurrence[y][x].direction[i].opacity/ density_x[z].direction[i].opacity/ density_y[x].direction[i].opacity; if (image->colorspace == CMYKColorspace) Q[z][y].direction[i].index+=cooccurrence[z][x].direction[i].index* cooccurrence[y][x].direction[i].index/ density_x[z].direction[i].index/density_y[x].direction[i].index; } } channel_features[RedChannel].contrast[i]+=z*z*pixel.direction[i].red; channel_features[GreenChannel].contrast[i]+=z*z*pixel.direction[i].green; channel_features[BlueChannel].contrast[i]+=z*z*pixel.direction[i].blue; if (image->matte != MagickFalse) channel_features[OpacityChannel].contrast[i]+=z*z* pixel.direction[i].opacity; if (image->colorspace == CMYKColorspace) channel_features[BlackChannel].contrast[i]+=z*z* pixel.direction[i].index; } /* Maximum Correlation Coefficient. Future: return second largest eigenvalue of Q. */ channel_features[RedChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[GreenChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[BlueChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->matte != MagickFalse) channel_features[OpacityChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); } /* Relinquish resources. */ sum=(ChannelStatistics *) RelinquishMagickMemory(sum); for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); return(channel_features); }
Grid.h
#pragma once #include "macros.h" #include "GridTypes.h" #include "ScalarField.h" #include "Vectors.h" #include "Constants.h" namespace pfc { enum InterpolationType { Interpolation_CIC, Interpolation_TSC, Interpolation_SecondOrder, Interpolation_FourthOrder, Interpolation_PCS }; template<typename Data, GridTypes gridType_> class Grid : // next labels define some properties of grid public LabelFieldsSpatialStraggered<gridType_>, public LabelFieldsTimeStraggered<gridType_> { public: static const GridTypes gridType = gridType_; Grid(const Int3 & _numInternalCells, const FP3 & minCoords, const FP3 & _steps, const Int3 & globalGridDims); Grid(const Int3 & _numAllCells, const Int3 & globalGridDims); // for complex grids only Grid(const Int3 & _numAllCells, const Int3 & globalGridDims, Grid<FP, gridType_>* grid); // 'grid' and 'this' will have common memory // copy constructor, can make shallow copies Grid(const Grid& grid, bool ifShallowCopy = false); // copy values from *this to *grid template <class TGrid> void copyValues(TGrid* grid) const; forceinline const FP3 BxPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftBx; } forceinline const FP3 ByPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftBy; } forceinline const FP3 BzPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftBz; } forceinline const FP3 ExPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJx; } forceinline const FP3 EyPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJy; } forceinline const FP3 EzPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJz; } forceinline const FP3 JxPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJx; } forceinline const FP3 JyPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJy; } forceinline const FP3 JzPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJz; } void getFieldsXYZ(FP x, FP y, FP z, FP3 & e, FP3 & b) const { FP3 coords(x, y, z); getFields(coords, e, b); } void getFields(const FP3& coords, FP3 & e, FP3 & b) const { (this->*interpolationFields)(coords, e, b); } virtual FP3 getJ(const FP3& coords) const; virtual FP3 getE(const FP3& coords) const; virtual FP3 getB(const FP3& coords) const; void getFieldsCIC(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsTSC(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsSecondOrder(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsFourthOrder(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsPCS(const FP3& coords, FP3 & e, FP3 & b) const; FP getEx(const FP3& coords) const { return (this->*interpolationEx)(coords); } FP getEy(const FP3& coords) const { return (this->*interpolationEy)(coords); } FP getEz(const FP3& coords) const { return (this->*interpolationEz)(coords); } FP getBx(const FP3& coords) const { return (this->*interpolationBx)(coords); } FP getBy(const FP3& coords) const { return (this->*interpolationBy)(coords); } FP getBz(const FP3& coords) const { return (this->*interpolationBz)(coords); } FP getJx(const FP3& coords) const { return (this->*interpolationJx)(coords); } FP getJy(const FP3& coords) const { return (this->*interpolationJy)(coords); } FP getJz(const FP3& coords) const { return (this->*interpolationJz)(coords); } FP getExCIC(const FP3& coords) const { return getFieldCIC(coords, Ex, shiftEJx); } FP getEyCIC(const FP3& coords) const { return getFieldCIC(coords, Ey, shiftEJy); } FP getEzCIC(const FP3& coords) const { return getFieldCIC(coords, Ez, shiftEJz); } FP getBxCIC(const FP3& coords) const { return getFieldCIC(coords, Bx, shiftBx); } FP getByCIC(const FP3& coords) const { return getFieldCIC(coords, By, shiftBy); } FP getBzCIC(const FP3& coords) const { return getFieldCIC(coords, Bz, shiftBz); } FP getJxCIC(const FP3& coords) const { return getFieldCIC(coords, Jx, shiftEJx); } FP getJyCIC(const FP3& coords) const { return getFieldCIC(coords, Jy, shiftEJy); } FP getJzCIC(const FP3& coords) const { return getFieldCIC(coords, Jz, shiftEJz); } FP getExTSC(const FP3& coords) const { return getFieldTSC(coords, Ex, shiftEJx); } FP getEyTSC(const FP3& coords) const { return getFieldTSC(coords, Ey, shiftEJy); } FP getEzTSC(const FP3& coords) const { return getFieldTSC(coords, Ez, shiftEJz); } FP getBxTSC(const FP3& coords) const { return getFieldTSC(coords, Bx, shiftBx); } FP getByTSC(const FP3& coords) const { return getFieldTSC(coords, By, shiftBy); } FP getBzTSC(const FP3& coords) const { return getFieldTSC(coords, Bz, shiftBz); } FP getJxTSC(const FP3& coords) const { return getFieldTSC(coords, Jx, shiftEJx); } FP getJyTSC(const FP3& coords) const { return getFieldTSC(coords, Jy, shiftEJy); } FP getJzTSC(const FP3& coords) const { return getFieldTSC(coords, Jz, shiftEJz); } FP getExSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Ex, shiftEJx); } FP getEySecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Ey, shiftEJy); } FP getEzSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Ez, shiftEJz); } FP getBxSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Bx, shiftBx); } FP getBySecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, By, shiftBy); } FP getBzSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Bz, shiftBz); } FP getJxSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Jx, shiftEJx); } FP getJySecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Jy, shiftEJy); } FP getJzSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Jz, shiftEJz); } FP getExFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Ex, shiftEJx); } FP getEyFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Ey, shiftEJy); } FP getEzFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Ez, shiftEJz); } FP getBxFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Bx, shiftBx); } FP getByFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, By, shiftBy); } FP getBzFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Bz, shiftBz); } FP getJxFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Jx, shiftEJx); } FP getJyFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Jy, shiftEJy); } FP getJzFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Jz, shiftEJz); } FP getExPCS(const FP3& coords) const { return getFieldPCS(coords, Ex, shiftEJx); } FP getEyPCS(const FP3& coords) const { return getFieldPCS(coords, Ey, shiftEJy); } FP getEzPCS(const FP3& coords) const { return getFieldPCS(coords, Ez, shiftEJz); } FP getBxPCS(const FP3& coords) const { return getFieldPCS(coords, Bx, shiftBx); } FP getByPCS(const FP3& coords) const { return getFieldPCS(coords, By, shiftBy); } FP getBzPCS(const FP3& coords) const { return getFieldPCS(coords, Bz, shiftBz); } FP getJxPCS(const FP3& coords) const { return getFieldPCS(coords, Jx, shiftEJx); } FP getJyPCS(const FP3& coords) const { return getFieldPCS(coords, Jy, shiftEJy); } FP getJzPCS(const FP3& coords) const { return getFieldPCS(coords, Jz, shiftEJz); } /*void dumpE(FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx); void dumpB(FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx); void dumpCurrents(FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx); void loadE(const FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx); void loadB(const FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx); void loadCurrents(const FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx);*/ /* Make all current density values zero. */ void zeroizeJ(); const Int3 getNumExternalLeftCells() const { Int3 result(2, 2, 2); for (int d = 0; d < 3; d++) if (globalGridDims[d] == 1) result[d] = 0; return result; } const Int3 getNumExternalRightCells() const { return getNumExternalLeftCells(); } void setInterpolationType(InterpolationType type); InterpolationType getInterpolationType() const; const Int3 globalGridDims; // important to initialize it first const FP3 steps; const Int3 numInternalCells; const Int3 numCells; const Int3 sizeStorage; // sometimes can be larger than numCells const FP3 origin; const int dimensionality; ScalarField<Data> Ex, Ey, Ez, Bx, By, Bz, Jx, Jy, Jz; private: // 3d shifts of the field in the cell const FP3 shiftEJx, shiftEJy, shiftEJz, shiftBx, shiftBy, shiftBz; /* Get grid index and normalized internal coords in [0, 0, 0]..(1, 1, 1) for given physical coords and shift. */ void getGridCoords(const FP3 & coords, const FP3 & shift, Int3 & idx, FP3 & internalCoords) const { idx.x = (int)((coords.x - origin.x - shift.x) / steps.x); idx.y = (int)((coords.y - origin.y - shift.y) / steps.y); idx.z = (int)((coords.z - origin.z - shift.z) / steps.z); internalCoords = (coords - baseCoords(idx.x, idx.y, idx.z) - shift) / steps; } void getClosestGridCoords(const FP3 & coords, const FP3 & shift, Int3 & idx, FP3 & internalCoords) const { idx.x = (int)((coords.x - origin.x - shift.x) / steps.x + 0.5); idx.y = (int)((coords.y - origin.y - shift.y) / steps.y + 0.5); idx.z = (int)((coords.z - origin.z - shift.z) / steps.z + 0.5); internalCoords = (coords - baseCoords(idx.x, idx.y, idx.z) - shift) / steps; } /* Get base coords of element (i, j, k) so that its real coords are base coords + corresponding shift. */ forceinline const FP3 baseCoords(int i, int j, int k) const { return origin + FP3(i, j, k) * steps; } // if coords is inside of the area that grid defines forceinline bool isInside(const FP3 & coords, const FP3 & shift) const { FP3 minCoords = origin + shift * steps; FP3 maxCoords = minCoords + (numCells - Int3(1, 1, 1)) * steps; return coords >= minCoords && coords <= maxCoords; } FP getFieldCIC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldTSC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldSecondOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldFourthOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldPCS(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; InterpolationType interpolationType; void (Grid::*interpolationFields)(const FP3&, FP3&, FP3&) const; FP (Grid::*interpolationEx)(const FP3&) const; FP(Grid::*interpolationEy)(const FP3&) const; FP(Grid::*interpolationEz)(const FP3&) const; FP(Grid::*interpolationBx)(const FP3&) const; FP(Grid::*interpolationBy)(const FP3&) const; FP(Grid::*interpolationBz)(const FP3&) const; FP(Grid::*interpolationJx)(const FP3&) const; FP(Grid::*interpolationJy)(const FP3&) const; FP(Grid::*interpolationJz)(const FP3&) const; }; typedef Grid<FP, GridTypes::YeeGridType> YeeGrid; typedef Grid<FP, GridTypes::StraightGridType> SimpleGrid; typedef Grid<FP, GridTypes::PSTDGridType> PSTDGrid; typedef Grid<FP, GridTypes::PSATDGridType> PSATDGrid; typedef Grid<FP, GridTypes::PSATDTimeStraggeredGridType> PSATDTimeStraggeredGrid; // create deep or shallow copy template<typename Data, GridTypes gridType_> inline Grid<Data, gridType_>::Grid(const Grid<Data, gridType_>& grid, bool ifShallowCopy) : globalGridDims(grid.globalGridDims), steps(grid.steps), numInternalCells(grid.numInternalCells), numCells(grid.numCells), sizeStorage(grid.sizeStorage), shiftEJx(grid.shiftEJx), shiftEJy(grid.shiftEJy), shiftEJz(grid.shiftEJz), shiftBx(grid.shiftBx), shiftBy(grid.shiftBy), shiftBz(grid.shiftBz), origin(grid.origin), dimensionality(grid.dimensionality), Ex(grid.Ex, ifShallowCopy), Ey(grid.Ey, ifShallowCopy), Ez(grid.Ez, ifShallowCopy), Bx(grid.Bx, ifShallowCopy), By(grid.By, ifShallowCopy), Bz(grid.Bz, ifShallowCopy), Jx(grid.Jx, ifShallowCopy), Jy(grid.Jy, ifShallowCopy), Jz(grid.Jz, ifShallowCopy) { setInterpolationType(grid.interpolationType); } template <> inline Grid<FP, GridTypes::YeeGridType>::Grid(const Int3 & _numCells, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), numInternalCells(_numCells), numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()), sizeStorage(numCells), Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage), Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage), Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage), shiftEJx(FP3(0, 0.5, 0.5) * steps), shiftEJy(FP3(0.5, 0, 0.5) * steps), shiftEJz(FP3(0.5, 0.5, 0) * steps), shiftBx(FP3(0.5, 0, 0) * steps), shiftBy(FP3(0, 0.5, 0) * steps), shiftBz(FP3(0, 0, 0.5) * steps), origin(minCoords.x - steps.x * getNumExternalLeftCells().x, minCoords.y - steps.y * getNumExternalLeftCells().y, minCoords.z - steps.z * getNumExternalLeftCells().z), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); setInterpolationType(Interpolation_CIC); } template<> inline Grid<FP, GridTypes::StraightGridType>::Grid(const Int3 & _numInternalCells, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), numInternalCells(_numInternalCells), numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()), sizeStorage(numCells), Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage), Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage), Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), origin(minCoords.x - steps.x * getNumExternalLeftCells().x, minCoords.y - steps.y * getNumExternalLeftCells().y, minCoords.z - steps.z * getNumExternalLeftCells().z), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } // SPECTRAL GRIDS // PSTD template<> inline Grid<complexFP, GridTypes::PSTDGridType>::Grid(const Int3 & _numInternalCells, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), numInternalCells(_numInternalCells), numCells(numInternalCells), sizeStorage(numCells), Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage), Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage), Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } template<> inline Grid<complexFP, GridTypes::PSTDGridType>::Grid(const Int3 & _numInternalCells, const Int3 & _globalGridDims, Grid<FP, GridTypes::PSTDGridType>* grid) : globalGridDims(_globalGridDims), numInternalCells(_numInternalCells), numCells(numInternalCells), sizeStorage(numCells), Ex(reinterpret_cast<complexFP*>(grid->Ex.getData()), sizeStorage), Ey(reinterpret_cast<complexFP*>(grid->Ey.getData()), sizeStorage), Ez(reinterpret_cast<complexFP*>(grid->Ez.getData()), sizeStorage), Bx(reinterpret_cast<complexFP*>(grid->Bx.getData()), sizeStorage), By(reinterpret_cast<complexFP*>(grid->By.getData()), sizeStorage), Bz(reinterpret_cast<complexFP*>(grid->Bz.getData()), sizeStorage), Jx(reinterpret_cast<complexFP*>(grid->Jx.getData()), sizeStorage), Jy(reinterpret_cast<complexFP*>(grid->Jy.getData()), sizeStorage), Jz(reinterpret_cast<complexFP*>(grid->Jz.getData()), sizeStorage), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } template<> inline Grid<FP, GridTypes::PSTDGridType>::Grid(const Int3 & _numInternalCells, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), numInternalCells(_numInternalCells), numCells(numInternalCells), sizeStorage(Int3(numCells.x, numCells.y, 2 * (numCells.z / 2 + 1))), Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage), Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage), Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), origin(minCoords), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } // PSATD template<> inline Grid<complexFP, GridTypes::PSATDGridType>::Grid(const Int3 & _numInternalCells, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), numInternalCells(_numInternalCells), numCells(numInternalCells), sizeStorage(numCells), Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage), Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage), Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } template<> inline Grid<complexFP, GridTypes::PSATDGridType>::Grid(const Int3 & _numInternalCells, const Int3 & _globalGridDims, Grid<FP, GridTypes::PSATDGridType>* grid) : globalGridDims(_globalGridDims), numInternalCells(_numInternalCells), numCells(numInternalCells), sizeStorage(numCells), Ex(reinterpret_cast<complexFP*>(grid->Ex.getData()), sizeStorage), Ey(reinterpret_cast<complexFP*>(grid->Ey.getData()), sizeStorage), Ez(reinterpret_cast<complexFP*>(grid->Ez.getData()), sizeStorage), Bx(reinterpret_cast<complexFP*>(grid->Bx.getData()), sizeStorage), By(reinterpret_cast<complexFP*>(grid->By.getData()), sizeStorage), Bz(reinterpret_cast<complexFP*>(grid->Bz.getData()), sizeStorage), Jx(reinterpret_cast<complexFP*>(grid->Jx.getData()), sizeStorage), Jy(reinterpret_cast<complexFP*>(grid->Jy.getData()), sizeStorage), Jz(reinterpret_cast<complexFP*>(grid->Jz.getData()), sizeStorage), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } template<> inline Grid<FP, GridTypes::PSATDGridType>::Grid(const Int3 & _numInternalCells, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), numInternalCells(_numInternalCells), numCells(numInternalCells), sizeStorage(Int3(numCells.x, numCells.y, 2 * (numCells.z / 2 + 1))), Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage), Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage), Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), origin(minCoords), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } // PSATDTimeStraggered template<> inline Grid<complexFP, GridTypes::PSATDTimeStraggeredGridType>::Grid(const Int3 & _numInternalCells, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), numInternalCells(_numInternalCells), numCells(numInternalCells), sizeStorage(numCells), Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage), Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage), Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } // PSATDTimeStraggered template<> inline Grid<complexFP, GridTypes::PSATDTimeStraggeredGridType>::Grid(const Int3 & _numInternalCells, const Int3 & _globalGridDims, Grid<FP, GridTypes::PSATDTimeStraggeredGridType>* grid) : globalGridDims(_globalGridDims), numInternalCells(_numInternalCells), numCells(numInternalCells), sizeStorage(numCells), Ex(reinterpret_cast<complexFP*>(grid->Ex.getData()), sizeStorage), Ey(reinterpret_cast<complexFP*>(grid->Ey.getData()), sizeStorage), Ez(reinterpret_cast<complexFP*>(grid->Ez.getData()), sizeStorage), Bx(reinterpret_cast<complexFP*>(grid->Bx.getData()), sizeStorage), By(reinterpret_cast<complexFP*>(grid->By.getData()), sizeStorage), Bz(reinterpret_cast<complexFP*>(grid->Bz.getData()), sizeStorage), Jx(reinterpret_cast<complexFP*>(grid->Jx.getData()), sizeStorage), Jy(reinterpret_cast<complexFP*>(grid->Jy.getData()), sizeStorage), Jz(reinterpret_cast<complexFP*>(grid->Jz.getData()), sizeStorage), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } template<> inline Grid<FP, GridTypes::PSATDTimeStraggeredGridType>::Grid(const Int3 & _numInternalCells, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), numInternalCells(_numInternalCells), numCells(numInternalCells), sizeStorage(Int3(numCells.x, numCells.y, 2 * (numCells.z / 2 + 1))), Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage), Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage), Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), origin(minCoords), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } // end SPECTRAL GRIDS template <typename Data, GridTypes gT> template <class TGrid> inline void Grid<Data, gT>::copyValues(TGrid* grid) const { const int nx = grid->numCells.x, ny = grid->numCells.y, nz = grid->numCells.z; OMP_FOR_COLLAPSE() for (int i = 0; i < nx; i++) for (int j = 0; j < ny; j++) OMP_SIMD() for (int k = 0; k < nz; k++) { grid->Ex(i, j, k) = this->getEx(grid->ExPosition(i, j, k)); grid->Ey(i, j, k) = this->getEy(grid->EyPosition(i, j, k)); grid->Ez(i, j, k) = this->getEz(grid->EzPosition(i, j, k)); grid->Bx(i, j, k) = this->getBx(grid->BxPosition(i, j, k)); grid->By(i, j, k) = this->getBy(grid->ByPosition(i, j, k)); grid->Bz(i, j, k) = this->getBz(grid->BzPosition(i, j, k)); grid->Jx(i, j, k) = this->getJx(grid->JxPosition(i, j, k)); grid->Jy(i, j, k) = this->getJy(grid->JyPosition(i, j, k)); grid->Jz(i, j, k) = this->getJz(grid->JzPosition(i, j, k)); } } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldCIC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getGridCoords(coords, shift, idx, internalCoords); return field.interpolateCIC(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldTSC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shift, idx, internalCoords); return field.interpolateTSC(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldSecondOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shift, idx, internalCoords); return field.interpolateSecondOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldFourthOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shift, idx, internalCoords); return field.interpolateFourthOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldPCS(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getGridCoords(coords, shift, idx, internalCoords); return field.interpolatePCS(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsCIC(const FP3& coords, FP3 & e, FP3 & b) const { /* For each component of E and B get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; getGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateCIC(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsTSC(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateTSC(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsSecondOrder(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateSecondOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsFourthOrder(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateFourthOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsPCS(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolatePCS(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP3 Grid<Data, gT>::getJ(const FP3& coords) const { // zero fields are outside of area that grid defines //if (!isInside(coords, shiftEJx) || !isInside(coords, shiftEJy) || !isInside(coords, shiftEJz)) // return FP3(0, 0, 0); /* For each component of J get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; FP3 j; getGridCoords(coords, shiftEJx, idx, internalCoords); j.x = Jx.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); j.y = Jy.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); j.z = Jz.interpolateCIC(idx, internalCoords); return j; } template< typename Data, GridTypes gT> inline FP3 Grid<Data, gT>::getE(const FP3& coords) const { // zero fields are outside of area that grid defines //if (!isInside(coords, shiftEJx) || !isInside(coords, shiftEJy) || !isInside(coords, shiftEJz)) // return FP3(0, 0, 0); /* For each component of J get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; FP3 e; getGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateCIC(idx, internalCoords); return e; } template< typename Data, GridTypes gT> inline FP3 Grid<Data, gT>::getB(const FP3& coords) const { // zero fields are outside of area that grid defines //if (!isInside(coords, shiftBx) || !isInside(coords, shiftBy) || !isInside(coords, shiftBz)) // return FP3(0, 0, 0); /* For each component of J get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; FP3 b; getGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateCIC(idx, internalCoords); return b; } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::zeroizeJ() { Jx.zeroize(); Jy.zeroize(); Jz.zeroize(); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::setInterpolationType(InterpolationType type) { interpolationType = type; switch (interpolationType) { case Interpolation_CIC: interpolationFields = &Grid<Data, gT>::getFieldsCIC; interpolationEx = &Grid<Data, gT>::getExCIC; interpolationEy = &Grid<Data, gT>::getEyCIC; interpolationEz = &Grid<Data, gT>::getEzCIC; interpolationBx = &Grid<Data, gT>::getBxCIC; interpolationBy = &Grid<Data, gT>::getByCIC; interpolationBz = &Grid<Data, gT>::getBzCIC; interpolationJx = &Grid<Data, gT>::getJxCIC; interpolationJy = &Grid<Data, gT>::getJyCIC; interpolationJz = &Grid<Data, gT>::getJzCIC; break; case Interpolation_TSC: interpolationFields = &Grid<Data, gT>::getFieldsTSC; interpolationEx = &Grid<Data, gT>::getExTSC; interpolationEy = &Grid<Data, gT>::getEyTSC; interpolationEz = &Grid<Data, gT>::getEzTSC; interpolationBx = &Grid<Data, gT>::getBxTSC; interpolationBy = &Grid<Data, gT>::getByTSC; interpolationBz = &Grid<Data, gT>::getBzTSC; interpolationJx = &Grid<Data, gT>::getJxTSC; interpolationJy = &Grid<Data, gT>::getJyTSC; interpolationJz = &Grid<Data, gT>::getJzTSC; break; case Interpolation_PCS: interpolationFields = &Grid<Data, gT>::getFieldsPCS; interpolationEx = &Grid<Data, gT>::getExPCS; interpolationEy = &Grid<Data, gT>::getEyPCS; interpolationEz = &Grid<Data, gT>::getEzPCS; interpolationBx = &Grid<Data, gT>::getBxPCS; interpolationBy = &Grid<Data, gT>::getByPCS; interpolationBz = &Grid<Data, gT>::getBzPCS; interpolationJx = &Grid<Data, gT>::getJxPCS; interpolationJy = &Grid<Data, gT>::getJyPCS; interpolationJz = &Grid<Data, gT>::getJzPCS; break; case Interpolation_SecondOrder: interpolationFields = &Grid<Data, gT>::getFieldsSecondOrder; interpolationEx = &Grid<Data, gT>::getExSecondOrder; interpolationEy = &Grid<Data, gT>::getEySecondOrder; interpolationEz = &Grid<Data, gT>::getEzSecondOrder; interpolationBx = &Grid<Data, gT>::getBxSecondOrder; interpolationBy = &Grid<Data, gT>::getBySecondOrder; interpolationBz = &Grid<Data, gT>::getBzSecondOrder; interpolationJx = &Grid<Data, gT>::getJxSecondOrder; interpolationJy = &Grid<Data, gT>::getJySecondOrder; interpolationJz = &Grid<Data, gT>::getJzSecondOrder; break; case Interpolation_FourthOrder: interpolationFields = &Grid<Data, gT>::getFieldsFourthOrder; interpolationEx = &Grid<Data, gT>::getExFourthOrder; interpolationEy = &Grid<Data, gT>::getEyFourthOrder; interpolationEz = &Grid<Data, gT>::getEzFourthOrder; interpolationBx = &Grid<Data, gT>::getBxFourthOrder; interpolationBy = &Grid<Data, gT>::getByFourthOrder; interpolationBz = &Grid<Data, gT>::getBzFourthOrder; interpolationJx = &Grid<Data, gT>::getJxFourthOrder; interpolationJy = &Grid<Data, gT>::getJyFourthOrder; interpolationJz = &Grid<Data, gT>::getJzFourthOrder; break; } } template<typename Data, GridTypes gT> inline InterpolationType Grid<Data, gT>::getInterpolationType() const { return interpolationType; } /*template<> inline void Grid<FP, YeeGridType>::dumpB(FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); b[idx].x = Bx(nodeIdx); b[idx].y = By(nodeIdx); b[idx].z = Bz(nodeIdx); } } template<> inline void Grid<FP, YeeGridType>::dumpE(FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); e[idx].x = Ex(nodeIdx); e[idx].y = Ey(nodeIdx); e[idx].z = Ez(nodeIdx); } } template<> inline void Grid<FP, YeeGridType>::dumpCurrents(FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); currents[idx].x = Jx(nodeIdx); currents[idx].y = Jy(nodeIdx); currents[idx].z = Jz(nodeIdx); idx++; } } template<> inline void Grid<FP, YeeGridType>::loadE(const FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; i++) for (int j = 0; j < numCells.y; j++) for (int k = 0; k < numCells.z; k++) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); Ex(nodeIdx) = e[idx].x; Ey(nodeIdx) = e[idx].y; Ez(nodeIdx) = e[idx].z; } } template<> inline void Grid<FP, YeeGridType>::loadB(const FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); Bx(nodeIdx) = b[idx].x; By(nodeIdx) = b[idx].y; Bz(nodeIdx) = b[idx].z; } } template<> inline void Grid<FP, YeeGridType>::loadCurrents(const FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; i++) for (int j = 0; j < numCells.y; j++) for (int k = 0; k < numCells.z; k++) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); Jx(nodeIdx) = currents[idx].x; Jy(nodeIdx) = currents[idx].y; Jz(nodeIdx) = currents[idx].z; } }*/ }
nested_serialized.c
// RUN: %libomp-compile-and-run | FileCheck %s // RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #include "callback.h" #include <omp.h> int main() { omp_set_nested(0); #pragma omp parallel num_threads(4) { print_ids(0); print_ids(1); #pragma omp parallel num_threads(4) { print_ids(0); print_ids(1); print_ids(2); } print_fuzzy_address(1); } print_fuzzy_address(2); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // Note that we cannot ensure that the worker threads have already called barrier_end and implicit_task_end before parallel_end! // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS: 0: NULL_POINTER=[[NULL:.*$]] // THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[NESTED_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[NESTED_RETURN_ADDRESS]] // THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] return 0; }
ompt-signal.h
// These functions are used to provide a signal-wait mechanism to enforce // expected scheduling for the test cases. Conditional variable (s) needs to be // shared! Initialize to 0 #include <unistd.h> #define OMPT_SIGNAL(s) ompt_signal(&s) // inline void ompt_signal(int *s) { #pragma omp atomic (*s)++; } #define OMPT_WAIT(s, v) ompt_wait(&s, v) // wait for s >= v // inline void ompt_wait(int *s, int v) { int wait = 0; do { usleep(10); #pragma omp atomic read wait = (*s); } while (wait < v); }
GB_unop__lgamma_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__lgamma_fp32_fp32 // op(A') function: GB_unop_tran__lgamma_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = lgammaf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = lgammaf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = lgammaf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LGAMMA || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__lgamma_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = lgammaf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = lgammaf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__lgamma_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
atomic-1.c
/* { dg-do run } */ /* { dg-additional-options "-march=pentium" { target { { i?86-*-* x86_64-*-* } && ia32 } } } */ #ifdef __i386__ #include "cpuid.h" #endif extern void abort (void); double d; struct { int i; double e; int j; } x; void f1 (void) { #pragma omp atomic d += 7.5; #pragma omp atomic d *= 2.5; #pragma omp atomic d /= 0.25; } void f2 (void) { #pragma omp atomic x.e += 7.5; #pragma omp atomic x.e *= 2.5; #pragma omp atomic x.e /= 0.25; } int main (void) { #ifdef __i386__ unsigned int eax, ebx, ecx, edx; if (!__get_cpuid (1, &eax, &ebx, &ecx, &edx)) return 0; if (!(edx & bit_CMPXCHG8B)) return 0; #endif d = 1.0; f1 (); if (d != 85.0) abort (); x.e = 1.0; f2 (); if (x.i != 0 || x.e != 85.0 || x.j != 0) abort (); return 0; }
pgpsda_fmt_plug.c
/* * Format for brute-forcing PGP SDAs (self-decrypting archives). * * This software is Copyright (c) 2017, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pgpsda; #elif FMT_REGISTERS_H john_register_one(&fmt_pgpsda); #else #include <string.h> #include <openssl/cast.h> #include "arch.h" #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "johnswap.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 // this is a slow format #endif #endif #include "sha.h" #include "loader.h" #include "pgpsda_common.h" #include "memdbg.h" #define FORMAT_LABEL "pgpsda" #define FORMAT_NAME "" #define ALGORITHM_NAME "PGP SDA SHA1 " ARCH_BITS_STR #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(int) #define BINARY_SIZE 8 #define BINARY_ALIGN sizeof(uint32_t) #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define FORMAT_TAG "$pgpsda$" #define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1) static struct custom_salt *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE * 2 / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(crypt_out) } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #undef SHA1_DIGEST_LENGTH #define SHA1_DIGEST_LENGTH 20 static void pgpsda_kdf(char *password, unsigned char *salt, unsigned char *key) { int plen; int iterations = cur_salt->iterations; SHA_CTX ctx; // SHA1 usage is hardcoded uint32_t j = 0; // "j" has type uint8_t in the original code plen = strlen(password); SHA1_Init(&ctx); SHA1_Update(&ctx, salt, 8); for (j = 0; j < iterations; j++) { SHA1_Update(&ctx, password, plen); #if ARCH_LITTLE_ENDIAN SHA1_Update(&ctx, (uint8_t*)&j, 1); #else SHA1_Update(&ctx, ((uint8_t*)&j) + 3, 1); #endif } SHA1_Final(key, &ctx); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { int i; for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { unsigned char key[SHA1_DIGEST_LENGTH]; CAST_KEY ck; pgpsda_kdf(saved_key[i+index], cur_salt->salt, key); CAST_set_key(&ck, 16, key); memset((unsigned char*)crypt_out[index+i], 0, BINARY_SIZE); CAST_ecb_encrypt(key, (unsigned char*)crypt_out[index+i], &ck, CAST_ENCRYPT); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(saved_key[index])); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_pgpsda = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, pgpsda_tests }, { init, done, fmt_default_reset, fmt_default_prepare, pgpsda_common_valid, fmt_default_split, get_binary, pgpsda_common_get_salt, { pgpsda_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
prepress.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS % % P P R R E P P R R E SS SS % % PPPP RRRR EEE PPPP RRRR EEE SSS SSS % % P R R E P R R E SS SS % % P R R EEEEE P R R EEEEE SSSSS SSSSS % % % % % % MagickCore Prepress Methods % % % % Software Design % % Cristy % % October 2001 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/pixel-accessor.h" #include "magick/prepress.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T o t a l I n k D e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageTotalInkDensity() returns the total ink density for a CMYK image. % Total Ink Density (TID) is determined by adding the CMYK values in the % darkest shadow area in an image. % % The format of the GetImageTotalInkDensity method is: % % double GetImageTotalInkDensity(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport double GetImageTotalInkDensity(Image *image) { CacheView *image_view; double total_ink_density; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'",image->filename); return(0.0); } status=MagickTrue; total_ink_density=0.0; exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double density; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { density=(double) GetPixelRed(p)+GetPixelGreen(p)+ GetPixelBlue(p)+GetPixelIndex(indexes+x); if (density > total_ink_density) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageTotalInkDensity) #endif { if (density > total_ink_density) total_ink_density=density; } p++; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) total_ink_density=0.0; return(total_ink_density); }
AlgebraicPageRank.h
/* * AlgebraicPageRank.h * * Created on: Jun 20, 2016 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_ALGORITHMS_ALGEBRAICPAGERANK_H_ #define NETWORKIT_CPP_ALGEBRAIC_ALGORITHMS_ALGEBRAICPAGERANK_H_ #include "../../base/Algorithm.h" #include "../../auxiliary/Parallel.h" #include "../../graph/Graph.h" #include "../GraphBLAS.h" #include "../Vector.h" namespace NetworKit { /** * @ingroup algebraic * Implementation of PageRank using the GraphBLAS interface. */ template<class Matrix> class AlgebraicPageRank : public Algorithm { public: /** * Constructs an instance of AlgebraicPageRank for the given @a graph. Page rank uses the damping factor @a damp * and the tolerance @a tol. * @param graph * @param damp * @param tol */ AlgebraicPageRank(const Graph& graph, const double damp = 0.85, const double tol = 1e-8) : damp(damp), tol(tol) { Matrix A = Matrix::adjacencyMatrix(graph); // normalize At by out-degree Vector invOutDeg = GraphBLAS::rowReduce(A); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(invOutDeg.getDimension()); ++i) { invOutDeg[i] = 1.0/invOutDeg[i]; } std::vector<Triplet> mTriplets(A.nnz()); index idx = 0; A.forNonZeroElementsInRowOrder([&](index i, index j, double value) { mTriplets[idx++] = {j,i, damp * value * invOutDeg[i]}; }); M = std::move(Matrix(A.numberOfRows(), mTriplets)); } void run() override; /** * Get a vector containing the betweenness score for each node in the graph. * @param moveOut Return the actual internal data instead of a copy. Resets the hasRun-state. Default: false. * @return The betweenness scores calculated by @link run(). */ std::vector<double> scores(bool moveOut = false); /** * Get a vector of pairs sorted into descending order. Each pair contains a node and the corresponding score * calculated by @link run(). * @return A vector of pairs. */ std::vector<std::pair<node, double>> ranking(); /** * Get the betweenness score of node @a v calculated by @link run(). * * @param v A node. * @return The betweenness score of node @a v. */ double score(node v); /** * Get the theoretical maximum of centrality score in the given graph. * * @return The maximum centrality score. */ double maximum() { return 1.0; } private: Matrix M; const double damp; const double tol; std::vector<double> scoreData; std::vector<double> edgeScoreData; }; template<class Matrix> void AlgebraicPageRank<Matrix>::run() { count n = M.numberOfRows(); double teleportProb = (1.0 - damp) / (double) n; Vector rank(n, 1.0/(double)n); Vector lastRank; do { lastRank = rank; rank = M * rank; rank.apply([&](double value) {return value += teleportProb;}); } while ((rank - lastRank).length() > tol); double sum = 0.0; #pragma omp parallel for reduction(+:sum) for (omp_index i = 0; i < static_cast<omp_index>(rank.getDimension()); ++i) { sum += rank[i]; } scoreData.resize(n, 0); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(rank.getDimension()); ++i) { scoreData[i] = rank[i] / sum; } hasRun = true; } template<class Matrix> std::vector<double> AlgebraicPageRank<Matrix>::scores(bool moveOut) { if (!hasRun) throw std::runtime_error("Call run method first"); hasRun = !moveOut; return moveOut ? std::move(scoreData) : scoreData; } template<class Matrix> std::vector<std::pair<node, double>> AlgebraicPageRank<Matrix>::ranking() { if (!hasRun) throw std::runtime_error("Call run method first"); std::vector<std::pair<node, double> > ranking; for (index i = 0; i < scoreData.size(); ++i) { ranking.push_back({i, scoreData[i]}); } Aux::Parallel::sort(ranking.begin(), ranking.end(), [](std::pair<node, double> x, std::pair<node, double> y) { return x.second > y.second; }); return ranking; } template<class Matrix> double AlgebraicPageRank<Matrix>::score(node v) { if (!hasRun) throw std::runtime_error("Call run method first"); return scoreData.at(v); } } /* namespace NetworKit */ #endif /* NETWORKIT_CPP_ALGEBRAIC_ALGORITHMS_ALGEBRAICPAGERANK_H_ */
GB_binop__bget_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bget_int32 // A.*B function (eWiseMult): GB_AemultB__bget_int32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bget_int32 // C+=b function (dense accum): GB_Cdense_accumb__bget_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_int32 // C=scalar+B GB_bind1st__bget_int32 // C=scalar+B' GB_bind1st_tran__bget_int32 // C=A+scalar GB_bind2nd__bget_int32 // C=A'+scalar GB_bind2nd_tran__bget_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_BITGET (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_BITGET (x, y, int32_t, 32) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT32 || GxB_NO_BGET_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bget_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bget_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bget_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bget_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bget_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bget_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = GB_BITGET (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bget_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = GB_BITGET (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (x, aij, int32_t, 32) ; \ } GrB_Info GB_bind1st_tran__bget_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (aij, y, int32_t, 32) ; \ } GrB_Info GB_bind2nd_tran__bget_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lmp2sqt.c
/*! * \file lmp2sqt.c * \brief * * \author KIM Hyeok (kh), ekh0324@gmail.com * * \internal * Created: 2017- 05- 29 * Revision: none * Compiler: gcc * Organization: Konkuk University * Copyright: Copyright (c) 2017, KIM Hyeok * * This source code is released for free distribution under the terms of the * GNU General Public License as published by the Free Software Foundation. */ #include "lmp2sqt.h" #include "snapshot.h" #include <assert.h> #include<stdio.h> #include<string.h> #include<stdlib.h> #include <unistd.h> #include <stdbool.h> #define M_PI 3.14159265358979323846 #include<math.h> #include"snapshot.h" #define DIM 3 #define flagSelf 1 typedef enum {N_I, N_R} VType; typedef struct { char *vName; void *vPtr; VType vType; int vLen, vStatus; } NameList; #define NameI(x) \ {#x, &x, N_I, sizeof (x) / sizeof (int)} #define NameR(x) \ {#x, &x, N_R, sizeof (x) / sizeof (real)} typedef struct { void *vPtr; VType vType; int vLen; } ValList; #define ValI(x) \ {&x, N_I, sizeof (x) / sizeof (int)} #define ValR(x) \ {&x, N_R, sizeof (x) / sizeof (real)} #define NP_I \ ((int *) (nameList[k].vPtr) + j) #define NP_R \ ((real *) (nameList[k].vPtr) + j) NameList nameList[] = { /*! * \brief system information */ NameR (kVal), NameR (deltaT), NameR (mass), /*! * \brief input parameter for evaluation */ NameR (rVal), NameI (limitCorrAv), NameI (nCBuffer), // number of simul. time seq NameI (nCSpatial), // number of spatial seq NameI (nCTime) // number of time seq }; typedef real coordi3[3]; #define OOS2 (0.7071067811865475) #define OOS3 (0.5773502691896258) coordi3 coordi_list[13]= { { +1,+0,+0}, { +0,+1,+0}, { +0,+0,+1}, // basic { +OOS2,+OOS2,+0}, { +OOS2,-OOS2,+0}, { +OOS2,+0,+OOS2}, { +OOS2,+0,-OOS2}, { +0,+OOS2,+OOS2}, { +0,+OOS2,-OOS2}, // 2axis combination { +OOS3,+OOS3,+OOS3}, { +OOS3,+OOS3,-OOS3}, { +OOS3,-OOS3,+OOS3}, { +OOS3,-OOS3,-OOS3} // 3axis combination }; char inputFilename[100]= "in.lmp2sqt"; void UpdateNameList (); void PrintNameList2File (FILE *fp); int GetNameList (int argc, char **argv); int Number_call_Print =0; int flag_global_alloc =0 ; int flag_global_alloc_more =0 ; void PrintSpacetimeCorr_binary ( FILE*); int main(int argc, char** argv) { /*! * \brief main 함수. 설명이 필요없다. * * \param argc * \param argv */ char filename[100]; int n_snap; if(argc <2) { perror("#run inputfilename inputfilename2 ifile3 ifile4..."); return 1; } GetNameList(argc,argv); int opt_num=1; int full_n_snaps=0, full_n_snaps_index=0,progress=-1; bool files_on [argc+3]; long int num_data [argc+3]; limitCorrAv = 0; omp_init_lock(&write_lock); omp_init_lock(&read_lock); for( opt_num = 1; opt_num < argc; opt_num++) { strcpy( filename,argv[opt_num]); FILE* fp = fopen( filename ,"r"); if( fp == NULL) { files_on [opt_num] = false; fprintf(stderr,"Can`t open file (%s)!!\n", filename); continue; } // kVal value have be changed because reciprocal information n_snap = 0; while(1) { bool check = read_dump_OnlyCheck(fp); if (check == false ) break; n_snap++; } // if ( flag_Max_eval) if (n_snap <5){ fprintf(stderr,"The # of snap is too small(<5)\n" "We would not use this file(%s)!!\n", filename); files_on [opt_num] = false; // return 23; } else if ( floor((n_snap - nCTime) /(nCTime/ nCBuffer)) <1) { fprintf(stderr,"The # of snap is too small()\n" "it dont make a infomation\n" "We would not use this file(%s)!!\n", filename); files_on [opt_num] = false; } else { num_data [opt_num ] = n_snap; if (nCBuffer < 1) { nCBuffer =1; } limitCorrAv += floor((n_snap - nCTime) /(nCTime/ nCBuffer)); full_n_snaps += n_snap; files_on [opt_num] = true; } fclose (fp); } PrintNameList2File(stderr); UpdateNameList (); int num_files =0; for( opt_num = 1; opt_num < argc; opt_num++) { if (files_on[opt_num] == true) num_files ++; } nthreads = omp_get_max_threads(); nthreads = (nthreads< num_files)? nthreads:num_files; nthreads = 1; real * r_done_works; int * i_done_works; AllocMem(classSqt,nthreads,MakeSqtClass); AllocMem(r_done_works,nthreads,real); AllocMem(i_done_works,nthreads,int); for ( int tID =0; tID < nthreads; tID++) { classSqt[tID].flag_alloc = 0; classSqt[tID].flag_alloc_more = 0; i_done_works[tID]= 0; } //#pragma omp parallel for schedule(dynamic) for( opt_num = 1; opt_num < argc; opt_num++) { if (files_on[opt_num] == false) continue; strcpy( filename,argv[opt_num]); FILE* fp = fopen( filename ,"r"); Snapshot* snap, *firstSnap; int threadID = omp_get_thread_num(); MakeSqtClass* cl_sqt = & classSqt[threadID]; InitSpacetimeCorr(cl_sqt); /*! * \brief Start Calculation. */ omp_set_lock(&read_lock); rewind(fp); firstSnap = read_dump(fp); Init_reciprocal_space(firstSnap); rewind(fp); omp_unset_lock(&read_lock); fprintf(stderr,"FULL_SNAPS = %5d\n",full_n_snaps); for (int ns = 0 ; ns < num_data[opt_num] ; ns++ ) { /* while(1) {} */ omp_set_lock(&read_lock); snap = read_dump(fp); omp_unset_lock(&read_lock); if (snap == NULL) break; cl_sqt->snap = snap; EvalSpacetimeCorr(cl_sqt); free_Snapshot(snap); #pragma omp atomic full_n_snaps_index++; i_done_works[threadID]++; int new_progress =(1000.0*full_n_snaps_index/ full_n_snaps); r_done_works[threadID] = (1000.0*i_done_works[threadID]/ full_n_snaps); /* fprintf(stderr,"FULL_SNAPS = %5d, newprogress %d\n",full_n_snaps_index, * new_progress); */ if (new_progress != progress ) { progress = new_progress; fprintf(stderr, "\r %4.1f%%(", progress*.1); for (int k =0 ; k<nthreads; k++) fprintf(stderr, "-tid:%d:%4.1f%%", k,r_done_works[k]*.1); fprintf(stderr, ")"); // fflush(stderr); } } // opt_num ++; fclose (fp); fprintf(stderr, "\nEnd : file : %s\n", filename); } if ( Number_call_Print ==0 ) { fprintf(stderr, "limit corr = %d, countCorrAv = %d\n", limitCorrAv,countCorrAv); FILE* output = fopen("full_results.info", "w"); limitCorrAv =countCorrAv; PrintSpacetimeCorr(output); fclose(output); } omp_destroy_lock(&write_lock); omp_destroy_lock(&read_lock); return 0; } void AccumSpacetimeCorr (MakeSqtClass* cl_sqt) // __thread_safe__ /*! * \brief 계산된 현재 시간의 SpaceTime correlation을 누적한다. */ { int k, nb, nr, n, nt; TBuf* tBuf = cl_sqt->tBuf; TBuf* pt; for (nb = 0; nb < nCBuffer; nb ++) { if (tBuf[nb].count == nCTime) { omp_set_lock(&write_lock); // check!! that data is full // S(q,t), M(q,t) part pt = &tBuf[nb]; for (k = 0; k < AVDOF * nCSpatial; k ++) { for (n = 0; n < nCTime; n ++) { avF_qq2[k][n] += pt->F_qq2[k][n]; avF_s_qq2[k][n] += pt->F_s_qq2[k][n]; avF_d_qq2[k][n] += pt->F_d_qq2[k][n]; } } // Diffuse Part for (nt = 0; nt < nCTime; nt ++) { rrMSDAv[nt] += pt->rrMSD[nt]; rrMQDAv[nt] += pt->rrMQD[nt]; real_tensor_increase_r1_r1(&rrMSR1_R_Av[nt], &pt->rrMSR1_R[nt]); real_tensor_increase_r2_r2(&rrMSR2_VR_Av[nt], &pt->rrMSR2_VR[nt]); for ( nr=0; nr<nCSpatial; nr++) { avDrTable[nr][nt] += pt->DrTable[nr][nt]; } } // buffer nb reset pt->count = 0; ++ countCorrAv; if (countCorrAv == limitCorrAv) { FILE* fout = fopen( "output.binary", "w+"); FILE* fout_text = fopen( "output.txt", "w+"); PrintSpacetimeCorr (fout_text); PrintSpacetimeCorr_binary( fout); fclose(fout); fclose(fout_text); ZeroAvSpacetimeCorr (); //FIXIT 아무의미없음 출력에서 교체합시다. } omp_unset_lock(&write_lock); } // if tBuf[nb].count is full } // for all buffer } void InitSpacetimeCorr (MakeSqtClass* cl_sqt) /*! * \brief 프로그램 초기에 시간 평균을 낼 수 있도록 index를 부여하는 과정 */ { if (cl_sqt->flag_alloc == 0 ) { omp_set_lock(&write_lock); AllocArray(cl_sqt); cl_sqt->flag_alloc = 1; flag_global_alloc =1; AllocMemCheck(); omp_unset_lock(&write_lock); } if (nCBuffer > nCTime) { fputs("Error nCBuffer> nCTime\n", stderr); exit(1); } TBuf* tBuf = cl_sqt->tBuf; for (int nb = 0; nb < nCBuffer; nb ++){ tBuf[nb].count = - nb * nCTime / nCBuffer; tBuf[nb].countDiff = - nb * nCTime / nCBuffer; } } void ZeroAvSpacetimeCorr () /*! * \brief 출력 후 또는, 프로그램 시작시 평균 계산을 위한 메모리를 * 0값으로 초기화 */ { int nk,nt, nr; countCorrAv = 0; for (nk = 0; nk < AVDOF * nCSpatial; nk ++) { for (nt = 0; nt < nCTime; nt ++) { avF_qq2[nk][nt] = 0.; avF_s_qq2[nk][nt] = 0.; avF_d_qq2[nk][nt] = 0.; } } for (nt = 0; nt < nCTime; nt ++) { rrMSDAv[nt] = 0.; rrMQDAv[nt] = 0.; real_tensor_zero_r1( &rrMSR1_R_Av[nt] ); real_tensor_zero_r2( &rrMSR2_VR_Av [nt] ) ; rrMSR2_VR_Av_offdig[nt] = 0.; rrMSR2_VR_Av_dig [nt] = 0.; } for (nt = 0; nt < nCTime; nt ++) { for (nr = 0; nr < nCSpatial; nr ++) { avDrTable[nr][nt]= 0.; } } } void EvalOtherInformation () /*! * \brief \f$ F(q,t) \f$를 출력전에 미분해서 data를 뽑아낸다. * */ { // this evaluation yield analysis.c #define Fqt_FIX_q avF_qq2[AVDOF*(nk) +AV_DEN] int nk, n, ppT, pT, cT, nT, nnT; real kVal2 = kVal*kVal, q_sqr; n=0; nnT = n+2; nT = n+1; cT = n; { //Forward O(h^2) for (nk = 0; nk < nCSpatial; nk ++) { valGammaQT[nk][n]= (-(Fqt_FIX_q[nnT]) +4.*(Fqt_FIX_q[nT]) -3.*(Fqt_FIX_q[cT]) )/ (2.0* deltaT*Fqt_FIX_q[cT]); q_sqr = kVal2 * (nk+1)*(nk+1); valDqt [nk][n] = - valGammaQT[nk][n] / q_sqr ; } } for (n = 1; n < nCTime-1; n ++) { /* centerd O(h^2) */ pT = n-1; nT = n+1; cT = n; for (nk = 0; nk < nCSpatial; nk ++) { valGammaQT[nk][n] = ( (Fqt_FIX_q[nT]) -(Fqt_FIX_q[pT]) )/ (2.0* deltaT*Fqt_FIX_q[cT]); q_sqr = kVal2 * (nk+1)*(nk+1); valDqt [nk][n] = - valGammaQT[nk][n] / q_sqr ; } } n= nCTime-1; ppT = n-2; pT = n-1; cT = n; { /* Backward O(h^2) */ for (nk = 0; nk < nCSpatial; nk ++) { valGammaQT[nk][n] = (+(3.*Fqt_FIX_q[cT]) -4.*(Fqt_FIX_q[pT]) +(Fqt_FIX_q[ppT]) )/ (2.0* deltaT*Fqt_FIX_q[cT]); q_sqr = kVal2 * (nk+1)*(nk+1); valDqt [nk][n] = - valGammaQT[nk][n] / q_sqr ; } } } void prePrintProcess () { real scale_factor = 1./(13.*nPtls*countCorrAv); #pragma omp parallel for for (int nr = 0; nr < AVDOF * nCSpatial; nr ++) { for (int nt = 0; nt < nCTime; nt ++){ avF_qq2[nr][nt] *= scale_factor; avF_s_qq2[nr][nt] *= scale_factor; avF_d_qq2[nr][nt] *= 0.5*scale_factor; } } // fac = 1./ ( DIM * 2 * nPtls * deltaT * limitCorrAv); /*----------------------------------------------------------------------------- * rrMSDAv -> mean square displacemnt * rrMQDAv -> mean quadropole displacemnt *-----------------------------------------------------------------------------*/ // fac = 1./ ( DIM * 2 * nPtls * deltaT * limitCorrAv); scale_factor = 1./ ( nPtls * countCorrAv); real factor_dig = 1./(3.*countCorrAv * g_Vol); real factor_offdig = 1./(6.*countCorrAv * g_Vol); #pragma omp parallel for for (int nt = 1; nt < nCTime; nt ++) { rrMSDAv[nt] *= scale_factor; rrMQDAv[nt] *= scale_factor; real_tensor_product_r1_r0r1(&rrMSR1_R_Av[nt], scale_factor, &rrMSR1_R_Av[nt]); /*! * \brief if all mass of particles is same value */ real_tensor_product_r2_r0r2(&rrMSR2_VR_Av[nt] , (.5*mass*mass),&rrMSR2_VR_Av[nt]); rrMSR2_VR_Av_dig[nt] = factor_dig* real_tensor_sum_dig_r2(&rrMSR2_VR_Av[nt]); rrMSR2_VR_Av_offdig[nt] = factor_offdig* real_tensor_sum_offdig_r2(&rrMSR2_VR_Av[nt]); for ( int nr=0; nr<nCSpatial; nr++) { avDrTable[nr][nt] *= factorDr[nr]; } } } void PrintSpacetimeCorr (FILE *fp) /*! * \brief 결과를 출력하는 함수 * * \param fp output file descriptor */ { prePrintProcess (); extern real kVal; int nType, k2, nr; Number_call_Print ++; // char *header[] = {"cur-long", "cur-trans", "density", "vanHove-self"}; char *header[] = { "full-density" , // 0 "self-density" , // 1 "cross-density" , // 2 "self-vanHove" // 3 }; fprintf (fp, "%s\n",txtCorr); //for (nType = 0; k < 3; k ++) { for (k2 = 0; k2 < sizeof(header)/ sizeof(char*); k2 ++) { /* fprintf (fp, "%s", header[nType]); * for (j = 0; j < nCSpatial; j ++) * fprintf (fp, " %7.3f", kVal*(j+1)); * fprintf (fp, "\n"); */ // EvalOtherInformation (); fprintf (fp, "# %s %7.3f %7.3f %7.3f\n", header[k2] , kVal, 1.0*deltaT, rVal); switch ( k2) { case 0: /*!----------------------------------------------------------------------------- * avF_qq2[AVDOF*i+nType][k] -> F(q_i,t_k) *-----------------------------------------------------------------------------*/ nType= 0; for (int nt = 0; nt < nCTime; nt ++) { /* deltaT = n *1. * deltaT; * fprintf (fp, "%7.3f", deltaT); */ for (int nk = 0; nk < nCSpatial; nk ++){ fprintf (fp, " %8.4e", avF_qq2[AVDOF * nk + nType][nt]); } fprintf (fp, "\n"); } break; /*----------------------------------------------------------------------------- * avF_s_qq2[3*i+nType][j] -> F_s(q_i,t_j) *-----------------------------------------------------------------------------*/ case 1: nType = 0; for (int nt = 0; nt < nCTime; nt ++) { /* deltaT = n *1. * deltaT; * fprintf (fp, "%7.3f", deltaT); */ for (int nr = 0; nr < nCSpatial; nr ++){ fprintf (fp, " %8.4e", avF_s_qq2[AVDOF * nr + nType][nt]); } fprintf (fp, "\n"); } break; /*----------------------------------------------------------------------------- * avF_d_qq2[3*i+nType][j] -> F_d(q_i,t_j) * magnetic *-----------------------------------------------------------------------------*/ case 2: nType = 0; for (int nt = 0; nt < nCTime; nt ++) { /* deltaT = n *1. * deltaT; * fprintf (fp, "%7.3f", deltaT); */ for (int nr = 0; nr < nCSpatial; nr ++){ fprintf (fp, " %8.4e", avF_d_qq2[AVDOF * nr + nType][nt]); } fprintf (fp, "\n"); } break; case 3: // fprintf (fp, "#van Hove function\n"); for (int nt = 0; nt < nCTime; nt ++) { for ( nr=0; nr<nCSpatial; nr++) { fprintf (fp, " %8.4e", avDrTable[nr][nt] ); } fprintf (fp, "\n"); } break; } fprintf (fp, "\n"); } void PrintEtc(); PrintEtc (); } void PrintSpacetimeCorr_binary (FILE *fp) /*! * \brief 결과를 출력하는 함수 * * \param fp output file descriptor */ { extern real kVal; int nType, k2, nr; // Number_call_Print ++; // char *header[] = {"cur-long", "cur-trans", "density", "vanHove-self"}; char *header[] = { "full-density" , // 0 "self-density" , // 1 "cross-density" , // 2 "self-vanHove" // 3 }; int nTypes = sizeof(header)/ sizeof(char*); fwrite (txtCorr,sizeof(char),strlen(txtCorr),fp); fwrite (&nTypes,sizeof(int) ,1,fp); // fwrite (fp, "%s\n",txtCorr); //for (nType = 0; k < 3; k ++) { for (k2 = 0; k2 < nTypes; k2 ++) { /* fprintf (fp, "%s", header[nType]); * for (j = 0; j < nCSpatial; j ++) * fprintf (fp, " %7.3f", kVal*(j+1)); * fprintf (fp, "\n"); */ // EvalOtherInformation (); real col2 = kVal; real col3 = 1.0*deltaT; real col4 = rVal; fwrite(header[k2],sizeof(char), strlen(header[k2]), fp); fwrite(&col2, sizeof(real),1,fp); fwrite(&col3, sizeof(real),1,fp); fwrite(&col4, sizeof(real),1,fp); /* fprintf (fp, "# %s %7.3f %7.3f %7.3f\n", header[k2] , kVal, 1.0*deltaT, rVal); */ switch ( k2) { case 0: /*!----------------------------------------------------------------------------- * avF_qq2[AVDOF*i+nType][k] -> F(q_i,t_k) *-----------------------------------------------------------------------------*/ nType= 0; for (int nt = 0; nt < nCTime; nt ++) { /* deltaT = n *1. * deltaT; * fprintf (fp, "%7.3f", deltaT); */ for (int nk = 0; nk < nCSpatial; nk ++){ fwrite( &(avF_qq2[AVDOF * nk + nType][nt]), sizeof(real),1,fp); } /* for (int nk = 0; nk < nCSpatial; nk ++){ * fprintf (fp, " %8.4e", avF_qq2[AVDOF * nk + nType][nt]); * } * fprintf (fp, "\n"); */ } break; /*----------------------------------------------------------------------------- * avF_s_qq2[3*i+nType][j] -> F_s(q_i,t_j) *-----------------------------------------------------------------------------*/ case 1: nType = 0; for (int nt = 0; nt < nCTime; nt ++) { /* deltaT = n *1. * deltaT; * fprintf (fp, "%7.3f", deltaT); */ for (int nk = 0; nk < nCSpatial; nk ++){ fwrite( &(avF_s_qq2[AVDOF * nk + nType][nt]), sizeof(real),1,fp); } /* for (int nr = 0; nr < nCSpatial; nr ++){ * fprintf (fp, " %8.4e", avF_s_qq2[AVDOF * nr + nType][nt]); * } * fprintf (fp, "\n"); */ } break; /*----------------------------------------------------------------------------- * avF_d_qq2[3*i+nType][j] -> F_d(q_i,t_j) * magnetic *-----------------------------------------------------------------------------*/ case 2: nType = 0; for (int nt = 0; nt < nCTime; nt ++) { /* deltaT = n *1. * deltaT; * fprintf (fp, "%7.3f", deltaT); */ for (int nk = 0; nk < nCSpatial; nk ++){ fwrite( &(avF_d_qq2[AVDOF * nk + nType][nt]), sizeof(real),1,fp); } /* for (int nr = 0; nr < nCSpatial; nr ++){ * fprintf (fp, " %8.4e", avF_d_qq2[AVDOF * nr + nType][nt]); * } * fprintf (fp, "\n"); */ } break; case 3: // fprintf (fp, "#van Hove function\n"); for (int nt = 0; nt < nCTime; nt ++) { for (int nk = 0; nk < nCSpatial; nk ++){ fwrite( &(avDrTable[nr][nt]), sizeof(real),1,fp); } /* for ( nr=0; nr<nCSpatial; nr++) { * fprintf (fp, " %8.4e", avDrTable[nr][nt] ); * } * fprintf (fp, "\n"); */ } break; } /* fprintf (fp, "\n"); */ } /* void PrintEtc(); * PrintEtc (); */ } void PrintEtc () { // char filename1[100] ="Dq00.info" ; // char filename2[100] ="Ft00.info" ; char filename1[100]; char filename2[100]; char filename3[100]; int nfile = 0; sprintf(filename1, "Dt%03d.info",nfile); sprintf(filename2, "vanHove%03d.info",nfile); sprintf(filename3, "SSF%03d.info",nfile); //printf( "access(%s) -> return %d", filename1, access(filename1,F_OK)); // // { // while( 0 == access(filename1,F_OK) ) { // /* fprintf(stderr, "Files are exist at least . (%03d) \n", nfile); // * sleep(1); // */ // nfile++; // sprintf(filename1, "Dt%03d.info",nfile); // sprintf(filename2, "vanHove%03d.info",nfile); // sprintf(filename3, "SSF%03d.info",nfile); // } // } /* FILE* fp_Dq = fopen(filename1,"w"); * fprintf (fp_Dq, "# dt = %7.3f\n", deltaT); * for (j = 0; j < nCSpatial; j ++) { * fprintf (fp_Dq, "%8.4f" , j*kVal ); * for (n = 1; n < nCTime; n ++) { * fprintf (fp_Dq, " %8.4e" , valDqt[j][n]); * } * fprintf (fp_Dq, "\n"); * } * fclose(fp_Dq); */ /* FILE* fp_Ft = fopen(filename2,"w"); * fprintf (fp_Ft, "# dq = %7.3e\n", kVal); * for (n = 0; n < nCTime; n ++) { * fprintf (fp_Ft, "%8.4f" , n*deltaT ); * for (j = 0; j < nCSpatial; j ++) { * fprintf (fp_Ft, " %8.4e" , avF_qq2[(3*j)+2][n]/avF_qq2[(3*j)+2][0]); * } * fprintf (fp_Ft, "\n"); * } * fclose(fp_Ft); */ FILE* fp_SSF = fopen(filename3,"w"); for (int nr = 0; nr < nCSpatial; nr ++) { fprintf (fp_SSF, "%8.4f" " %8.4e""\n" , (nr+1)*kVal , // avF_qq2[(AVDOF*nr)+AV_DEN][0]); avF_qq2[(AVDOF*nr)+0][0]); } fclose(fp_SSF); // fprintf (fp_SSF, "# dq = %7.3e\n", kVal); FILE* fp_Dt = fopen(filename1,"w"); fprintf (fp_Dt, "#time MSD msdx msdy msdz D(t) MQD MSVR_dig MSVR_offdig xy yx zy yz xz zx\n"); real fac = 1./( 2.* deltaT * DIM * 2); int nr=0; rrDt[nr] = fac*(-rrMSDAv[nr+2] +4.*rrMSDAv[nr+1] - 3.* rrMSDAv[nr]); for ( nr = 1; nr < nCTime-1; nr += 1 ) { rrDt[nr] = fac*(rrMSDAv[nr+1] -rrMSDAv[nr-1] ); } nr=nCTime-1; rrDt[nr] = fac*(rrMSDAv[nr-2] -4.*rrMSDAv[nr-1] + 3.* rrMSDAv[nr]); for ( int nt = 0; nt < nCTime; nt += 1 ) { real tVal = nt * deltaT; fprintf (fp_Dt, "%8.4f %8.4e %8.4e %8.4e %8.4e %8.4e %8.4e %8.4e %8.4e %8.4e %8.4e %8.4e %8.4e %8.4e %8.4e \n", tVal, rrMSDAv[nt] , rrMSR1_R_Av[nt].x, rrMSR1_R_Av[nt].y, rrMSR1_R_Av[nt].z, rrDt[nt], rrMQDAv[nt], rrMSR2_VR_Av_dig[nt], rrMSR2_VR_Av_offdig[nt] , rrMSR2_VR_Av[nt].xy , rrMSR2_VR_Av[nt].yx , rrMSR2_VR_Av[nt].zy , rrMSR2_VR_Av[nt].yz , rrMSR2_VR_Av[nt].xz , rrMSR2_VR_Av[nt].zx ); } fclose(fp_Dt); /*----------------------------------------------------------------------------- * van Hove function part *-----------------------------------------------------------------------------*/ /* fprintf (fp_Gr, "#van Hove function\n"); * FILE* fp_Gr = fopen(filename2,"w"); * * for ( nr=0; nr<nCSpatial; nr++) { * for (j = 0; j < nCTime; j ++) { * fprintf (fp_Gr, " %8.4e", avDrTable[nr][j] ); * } * fprintf (fp_Gr, "\n"); * } * fprintf (fp_Gr, "\n"); * fclose(fp_Gr); */ } void ZeroOneTimeCorr(MakeSqtClass* cl_sqt) { TBuf* tBuf = cl_sqt->tBuf; real * rho_q1 = tBuf->rho_q1 ; real ** rho_s_q1 = tBuf->rho_s_q1 ; real ** rho_d_q1 = tBuf->rho_d_q1; for (int j = 0; j < FDOF * nCSpatial; j ++) { rho_q1[j] = 0.; } if ( flagSelf ) { for (int n=0; n<nPtls; n++) { for (int j = 0; j < FDOF * nCSpatial; j ++) { rho_s_q1[n][j] = 0.; rho_d_q1[n][j] = 0.; } } } real_tensor_zero_r2(&tBuf->sumVR_ct); } void EvalOneTimeSumVR(MakeSqtClass* cl_sqt) { Rank2R3 VR; VecR3 vecr3,vel; atom* col_i; TBuf* tBuf = cl_sqt->tBuf; Snapshot* snap = cl_sqt->snap; for (int n=0; n<nPtls; n++) { col_i = &(snap->atoms[n]); vecr3.x = col_i->x; vecr3.y = col_i->y; vecr3.z = col_i->z; vel.x = col_i->vx; vel.y = col_i->vy; vel.z = col_i->vz; real_tensor_product_r2_r1r1 (& VR, &vel, &vecr3); real_tensor_increase_r2_r2(&tBuf->sumVR_ct, &VR); } } void EvalOneTimeKspace(MakeSqtClass* cl_sqt) { real r[3], v[3],mu[3]; Snapshot* snap = cl_sqt->snap; TBuf* tBuf = cl_sqt->tBuf; real * rho_q1 = tBuf->rho_q1 ; real * rho_s_q1_temp = tBuf->rho_s_q1_temp ; real ** rho_s_q1 = tBuf->rho_s_q1 ; real ** rho_d_q1 = tBuf->rho_d_q1; atom* col_i; /*----------------------------------------------------------------------------- * Direct calculate rho(q) *-----------------------------------------------------------------------------*/ for (int n=0; n<nPtls; n++) { col_i = &(snap->atoms[n]); r[0] = col_i->x; r[1] = col_i->y; r[2] = col_i->z; v[0] = col_i->vx; v[1] = col_i->vy; v[2] = col_i->vz; mu[0] = col_i->mux; mu[1] = col_i->muy; mu[2] = col_i->muz; // #pragma omp parallel for for (int k = 0; k < N_AXIS; k ++) { real b,c,s,c0,c1,s1,c2,s2; coordi3* c3=&coordi_list[k]; for (int m = 0; m < nCSpatial; m ++) { int marker = (nCSpatial* DOF)*k + DOF*m; // Because the time of integer calculation is small, // change the code more explicity #ifndef SLOW_FULL_MATH if (m == 0) { b = kVal * ( r[0]*(*c3)[0] + r[1]*(*c3)[1] + r[2]*(*c3)[2]); c = cos (b); s = sin (b); c0 = c; } else if (m == 1) { c1 = c; s1 = s; c = 2. * c0 * c1 - 1.; //cos(2x)=2cos(x)^2-1 s = 2. * c0 * s1; // sin(2x)=2sin(x)cos(x) } else { c2 = c1; s2 = s1; c1 = c; s1 = s; c = 2. * c0 * c1 - c2; // Check true!! s = 2. * c0 * s1 - s2; // Check true!! } #else b = kVal * r[k]; c = cos( (m+1)*b); s = sin( (m+1)*b); #endif rho_s_q1_temp[marker +0] = c; rho_s_q1_temp[marker +1] = s; }// loop spatial slice } // for DIMEN // memcpy(rho_s_q1, rho_q1,sizeof(real)*24*nCSpatial); for(int nk=0; nk< FDOF * nCSpatial; nk++ ) { rho_q1 [ nk] += rho_s_q1_temp[nk]; } if ( flagSelf ) { memcpy(rho_s_q1[n], rho_s_q1_temp, sizeof(real)*FDOF*nCSpatial); } } /* for loop : n<nPtls */ if ( flagSelf ) { for(int nk=0; nk< FDOF * nCSpatial; nk++ ) { for (int n=0; n<nPtls; n++) { rho_d_q1[n] [ nk] = rho_q1[nk] - rho_s_q1 [n][nk]; } } } } void EvalOneTimeCorr(MakeSqtClass* cl_sqt) /*! * * \brief one time Correlation을 계산한다. * q space value는 x, y, z 방향 세개의 방향으로 * longitudinal version, translational version과 가장 기본적인 방향성분 없는 density */ { void ZeroOneTimeCorr(MakeSqtClass* cl_sqt); void EvalOneTimeSumVR(MakeSqtClass* cl_sqt) ; void EvalOneTimeKspace(MakeSqtClass* cl_sqt); ZeroOneTimeCorr(cl_sqt); EvalOneTimeSumVR(cl_sqt); EvalOneTimeKspace(cl_sqt); } void SetWaitedTimeCorr(MakeSqtClass* cl_sqt, TBuf* tBuf_tw) { /*----------------------------------------------- * t_w information *-----------------------------------------------*/ TBuf* tBuf = cl_sqt->tBuf; Snapshot* snap = cl_sqt->snap; real * rho_q1 = tBuf->rho_q1 ; real ** rho_s_q1 = tBuf->rho_s_q1 ; real ** rho_d_q1 = tBuf->rho_d_q1; real_tensor_copy_r2r2(& tBuf_tw->orgSumVR, &tBuf->sumVR_ct); for (int n=0; n<nPtls; n++) { tBuf_tw->orgR[n].x = snap->atoms[n].x; tBuf_tw->orgR[n].y = snap->atoms[n].y; tBuf_tw->orgR[n].z = snap->atoms[n].z; } for (int j = 0; j < FDOF * nCSpatial; j ++){ tBuf_tw->org_rho_q1[j] = rho_q1[j]; if ( flagSelf ) { for (int n=0; n<nPtls; n++) { tBuf_tw->org_rho_s_q1[n][j] = rho_s_q1[n][j]; tBuf_tw->org_rho_d_q1[n][j] = rho_d_q1[n][j]; } // for n } // if flagSelf } // for j } void InitTwoTimeCorr (MakeSqtClass* cl_sqt, TBuf* tBuf_tw, int subtime) { /*------------------------------ * Zero initializing *-----------------------------*/ tBuf_tw->rrMSD[subtime]= 0.; tBuf_tw->rrMQD[subtime]= 0.; real_tensor_zero_r1 (&tBuf_tw->rrMSR1_R[subtime]); real_tensor_zero_r2 (&tBuf_tw->rrMSR2_VR[subtime]); for (int nr=0; nr<nCSpatial; nr++) { tBuf_tw->DrTable[nr][subtime] =0; } //F_qq2 0 KSpace for (int k = 0; k < AVDOF * nCSpatial; k ++) { tBuf_tw->F_qq2[k][subtime] = 0.; tBuf_tw->F_s_qq2[k][subtime] = 0.; tBuf_tw->F_d_qq2[k][subtime] = 0.; } } void EvalTwoTimeEach(MakeSqtClass* cl_sqt, TBuf* tBuf_tw, int subtime) { Snapshot* snap = cl_sqt->snap; for (int n=0; n<nPtls; n++) { VecR3 dr; real dx2,dy2,dz2,dr2; atom* col_i = &(snap->atoms[n]); dr.x = col_i->x-tBuf_tw->orgR[n].x ; dr.y = col_i->y-tBuf_tw->orgR[n].y ; dr.z = col_i->z-tBuf_tw->orgR[n].z ; dx2 = dr.x*dr.x; dy2 = dr.y*dr.y; dz2 = dr.z*dr.z; dr2 = dx2 + dy2 + dz2; int i_Dr = floor (sqrt(dr2)/rVal); if (i_Dr<nCSpatial) tBuf_tw->DrTable[i_Dr][subtime] ++; tBuf_tw->rrMSD[subtime] += dr2; tBuf_tw->rrMSR1_R[subtime].x += dx2; tBuf_tw->rrMSR1_R[subtime].y += dy2; tBuf_tw->rrMSR1_R[subtime].z += dz2; tBuf_tw->rrMQD[subtime] += dr2*dr2; } } void EvalTwoTimeCollective(MakeSqtClass* cl_sqt, TBuf* tBuf_tw, int subtime) { Rank2R3 subVR,sqVR; TBuf* tBuf = cl_sqt->tBuf; real_tensor_sub_r2_r2r2(&subVR, &tBuf->sumVR_ct, &tBuf_tw->orgSumVR); real_tensor_product_r2_r2r2 (& sqVR, & subVR, & subVR); real_tensor_increase_r2_r2(&tBuf_tw->rrMSR2_VR[subtime], &sqVR); } void EvalTwoTimeKSpace(MakeSqtClass* cl_sqt, TBuf* tBuf_tw, int subtime) { TBuf* tBuf = cl_sqt->tBuf; real * rho_q1 = tBuf->rho_q1 ; real ** rho_s_q1 = tBuf->rho_s_q1 ; real ** rho_d_q1 = tBuf->rho_d_q1; for (int axis_b = 0; axis_b < N_AXIS; axis_b ++) { // 3 loop #pragma omp parallel for for (int nk = 0; nk < nCSpatial; nk ++) { const int avMarker = nk*AVDOF; const int marker = (nCSpatial* DOF)*axis_b + DOF*nk; int nc= 0; // for (int nc = 0; nc < 7; nc ++) { //DOF/2 = 7 int nav; real w; int ncos = marker + 2*nc; int nsin = marker + 2*nc +1; /* if (nc < 3) { * int axis_a = nc; * if (axis_a == axis_b) { * w = 1.0; * nav = avMarker +V_LONG ; * } * else { * w = 0.5; // * nav = avMarker +V_TRANS ; * } * // else w *= 0.5; * } * else if (nc<6) { * int axis_a = nc -3; * // w = Sqr (kVal * (m + 1)); * if (axis_a == axis_b) { // longitudinal * w = 1.0; * nav = avMarker + M_LONG; * } * else { //trasverse * w = 0.5; // * nav = avMarker +M_TRANS ; * } * // else w *= 0.5; * } * else if (nc==6){ * w = 1.; * nav = avMarker + AV_DEN; * }; // density 3*m+4 */ w = 1.; nav = avMarker; // cos(q*r(t)) cos(q*r(t_w) +sin sin if (flagSelf ) { for (int n=0; n<nPtls; n++) { tBuf_tw->F_s_qq2[nav][subtime] += w * (rho_s_q1[n][ncos] * tBuf_tw->org_rho_s_q1[n][ncos] + rho_s_q1[n][nsin] * tBuf_tw->org_rho_s_q1[n][nsin]); tBuf_tw->F_d_qq2[nav][subtime] += w * (rho_d_q1[n][ncos] * tBuf_tw->org_rho_s_q1[n][ncos] + rho_d_q1[n][nsin] * tBuf_tw->org_rho_s_q1[n][nsin])+ w * (rho_s_q1[n][ncos] * tBuf_tw->org_rho_d_q1[n][ncos] + rho_s_q1[n][nsin] * tBuf_tw->org_rho_d_q1[n][nsin]); } } tBuf_tw->F_qq2[nav][subtime] += w * (rho_q1[ncos] * tBuf_tw->org_rho_q1[ncos] + rho_q1[nsin] * tBuf_tw->org_rho_q1[nsin]); // } // for nc, } // for nk , } } void EvalTwoTimeCorr(MakeSqtClass* cl_sqt, TBuf* tBuf_tw, int subtime) { InitTwoTimeCorr(cl_sqt, tBuf_tw, subtime); EvalTwoTimeEach(cl_sqt, tBuf_tw, subtime); EvalTwoTimeCollective(cl_sqt, tBuf_tw, subtime); EvalTwoTimeKSpace(cl_sqt, tBuf_tw, subtime); } void EvalSpacetimeCorr(MakeSqtClass* cl_sqt) /*! * * \brief space time correlation을 계산한다. * q space value는 x, y, z 방향 세개의 방향으로 * longitudinal version, translational version과 가장 기본적인 방향성분 없는 density * version 3개를 구함. * PREV. $M_T(q,t)$ $M_L(q,t)$ * Todo. * \param Snapshot* Snapshot 포인터 */ { extern real kVal; void EvalOneTimeCorr(MakeSqtClass* cl_sqt); TBuf* tBuf = cl_sqt->tBuf; Snapshot* snap = cl_sqt->snap; L = snap->box.xhigh- snap->box.xlow; g_Vol = L*L*L; nPtls = snap->n_atoms; if (cl_sqt->flag_alloc_more ==0 ) { omp_set_lock(&write_lock); Alloc_more(cl_sqt); cl_sqt->flag_alloc_more =1; flag_global_alloc_more = 1; AllocMemCheck (); omp_unset_lock(&write_lock); } kVal = 2.*M_PI / L; EvalOneTimeCorr(cl_sqt); // End Calculate Current time value // Begin Two time corrlation function for (int nb = 0; nb < nCBuffer; nb ++) { if (tBuf[nb].count == 0) { SetWaitedTimeCorr(cl_sqt, &tBuf[nb]); } // End buffer count ==0 if (tBuf[nb].count >= 0) { EvalTwoTimeCorr(cl_sqt,&tBuf[nb],tBuf[nb].count); } // End buffer count >=0 ++ tBuf[nb].count; } AccumSpacetimeCorr (cl_sqt); } void AllocMemCheck () { if (ErrorAllocMem == 1) { printf("Reserving memory Error!!!!!!\n"); exit(1); } } void AllocArray (MakeSqtClass* cl_sqt) /*! * \brief 이름그대로 memory 할다함. * rho_q1 functions of q * 등. */ { int nb; if (flag_global_alloc ==0 ) { AllocMem2 (avF_s_qq2, AVDOF * nCSpatial, nCTime, real); AllocMem2 (avF_d_qq2, AVDOF * nCSpatial, nCTime, real); AllocMem2 (avF_qq2, AVDOF * nCSpatial, nCTime, real); AllocMem2 (valDqt, nCSpatial, nCTime, real); AllocMem2 (valGammaQT, nCSpatial, nCTime, real); } AllocMem (cl_sqt->tBuf, nCBuffer, TBuf); TBuf* tBuf = cl_sqt->tBuf; AllocMem (tBuf->rho_q1, FDOF * nCSpatial, real); for (nb = 0; nb < nCBuffer; nb ++) { AllocMem (tBuf[nb].org_rho_q1, FDOF * nCSpatial, real); AllocMem2 (tBuf[nb].F_s_qq2, AVDOF * nCSpatial, nCTime, real); AllocMem2 (tBuf[nb].F_d_qq2, AVDOF * nCSpatial, nCTime, real); AllocMem2 (tBuf[nb].F_qq2, AVDOF * nCSpatial, nCTime, real); } /*! * \brief Memory for Green-Kubo formula */ // AllocArray for Diffuse () if (flag_global_alloc ==0 ) { AllocMem (rrMSDAv, nCTime, real); AllocMem (rrMSR1_R_Av , nCTime, VecR3); AllocMem (rrMQDAv, nCTime, real); // AllocArray for shear viscosity // (diffusion of momentum) AllocMem (rrMSR2_VR_Av, nCTime, Rank2R3); AllocMem (rrMSR2_VR_Av_dig, nCTime, real); AllocMem (rrMSR2_VR_Av_offdig, nCTime, real); AllocMem (rrDt, nCTime, real); AllocMem2 (avDrTable, nCSpatial,nCTime, real); ZeroAvSpacetimeCorr(); } fprintf(stderr, "Reserving memory on heap via AllocMem : %lld GB\n", ll_mem_size/1000ll/1000ll/1000ll); } void Alloc_more (MakeSqtClass* cl_sqt) { /*! * \brief Alloc_more * Allocing using nPtls is post-process * */ int nb,nr; real rho0, shell_Vol; TBuf* tBuf = cl_sqt->tBuf; AllocMem (tBuf->rho_s_q1_temp, FDOF * nCSpatial, real); if (flagSelf ) { AllocMem (tBuf->rho_s_q1, nPtls, real*); AllocMem (tBuf->rho_d_q1, nPtls, real*); for (int natom=0; natom <nPtls ; natom++) { AllocMem (tBuf->rho_s_q1[natom], FDOF * nCSpatial, real); AllocMem (tBuf->rho_d_q1[natom], FDOF * nCSpatial, real); } fprintf(stderr, "Reserving memory on heap via AllocMem : %lld GB\n", ll_mem_size/1000ll/1000ll/1000ll); } for (nb = 0; nb < nCBuffer; nb ++) { AllocMem (tBuf[nb].orgR, nPtls, VecR3); AllocMem (tBuf[nb].rrMSD, nCTime, real); AllocMem (tBuf[nb].rrMQD, nCTime, real); AllocMem (tBuf[nb].rrMSR1_R, nCTime, VecR3); AllocMem (tBuf[nb].rrMSR2_VR, nCTime, Rank2R3); AllocMem2 (tBuf[nb].DrTable, nCSpatial,nCTime, int); if (flagSelf) { AllocMem (tBuf[nb].org_rho_s_q1, nPtls, real*); AllocMem (tBuf[nb].org_rho_d_q1, nPtls, real*); for (int natom=0; natom <nPtls ; natom++) { AllocMem (tBuf[nb].org_rho_s_q1[natom], FDOF * nCSpatial, real); AllocMem (tBuf[nb].org_rho_d_q1[natom], FDOF * nCSpatial, real); } } } fprintf(stderr, "Reserving memory on heap via AllocMem : %lld GB\n", ll_mem_size/1000ll/1000ll/1000ll); if (flag_global_alloc_more ==0 ) { AllocMem (factorDr, nCSpatial, real); AllocMem (radius, nCSpatial, real); rho0 = nPtls/g_Vol; for (nr = 0; nr < nCSpatial; nr ++) { if (nr ==0) { shell_Vol = 4*M_PI /3. * pow(rVal,3); } else{ shell_Vol = (4./3.)*M_PI * ( pow( (nr+1)*rVal,3)-pow(nr*rVal,3)) ; } // else shell_Vol = 4*M_PI * pow(rVal,3)* (nr*nr + 1./12.); // else 부분 확실히 해야함 최근에 다룬적 있음. radius [nr] = (nr+.5) * rVal; factorDr[nr] = 1./( pow(rho0,2) * g_Vol *shell_Vol*limitCorrAv); /* printf("rho0=%.2e, Vol=%.2e, shell_Vol=%.2e, factorDr=%.2e\n", * rho0,g_Vol,shell_Vol,factorDr[nr]); */ } } // if flag_global_alloc_more fprintf(stderr, "Reserving memory on heap via AllocMem : %lld GB\n", ll_mem_size/1000ll/1000ll/1000ll); } int GetNameList (int argc, char **argv) /*! * \brief from book of rapaport * ex) input.file * Name value * Name2 value * value type : real(double type) int * Well defined. */ { int j, k, match, ok; char buff[100], *token; FILE *fp; strcpy (buff, inputFilename); // strcpy (buff, argv[0]); // strcat (buff, ".in"); if ((fp = fopen (buff, "r")) == 0) { fp = fopen(buff, "w"); for (k = 0; k < sizeof (nameList) / sizeof (NameList); k ++) { fprintf (fp, "%s\t", nameList[k].vName); if (strlen (nameList[k].vName) < 8) fprintf (fp, "\t"); for (j = 0; j < nameList[k].vLen; j ++) { switch (nameList[k].vType) { case N_I: fprintf (fp, "%d ", 0); // fprintf (fp, "%d ", *NP_I); break; case N_R: fprintf (fp, "%#g ", 0.00); // fprintf (fp, "%#g ", *NP_R); break; } fprintf (fp, "\n"); } } fprintf (fp, "----\n"); fclose(fp); printf("GetDataError\n"); exit (1); } for (k = 0; k < sizeof (nameList) / sizeof (NameList); k ++) nameList[k].vStatus = 0; ok = 1; while (1) { fgets (buff, 80, fp); if (feof (fp)) break; token = strtok (buff, " \t\n"); if (! token) break; match = 0; for (k = 0; k < sizeof (nameList) / sizeof (NameList); k ++) { if (strcmp (token, nameList[k].vName) == 0) { match = 1; if (nameList[k].vStatus == 0) { nameList[k].vStatus = 1; for (j = 0; j < nameList[k].vLen; j ++) { token = strtok (NULL, ", \t\n"); if (token) { switch (nameList[k].vType) { case N_I: *NP_I = atol (token); break; case N_R: *NP_R = atof (token); break; } } else { nameList[k].vStatus = 2; ok = 0; } } token = strtok (NULL, ", \t\n"); if (token) { nameList[k].vStatus = 3; ok = 0; } break; } else { nameList[k].vStatus = 4; ok = 0; } } } if (! match) ok = 0; } fclose (fp); if(nCBuffer > nCTime ) nCBuffer = nCTime; for (k = 0; k < sizeof (nameList) / sizeof (NameList); k ++) { if (nameList[k].vStatus != 1) ok = 0; } return (ok); } void UpdateNameList () /*! * \brief 초기값을 출력하는 함수 getNameList의 짝함수이다. * * \param fp FILE* file descriptor */ { char buff[100]; FILE* fp; strcpy (buff, inputFilename); fp = fopen( buff, "w"); PrintNameList2File(fp); fclose(fp); } void PrintNameList2File (FILE *fp) /*! * \brief 초기값을 출력하는 함수 getNameList의 짝함수이다. * * \param fp FILE* file descriptor */ { int j, k; fprintf (fp, "NameList -- data\n"); for (k = 0; k < sizeof (nameList) / sizeof (NameList); k ++) { fprintf (fp, "%s\t", nameList[k].vName); if (strlen (nameList[k].vName) < 8) fprintf (fp, "\t"); if (nameList[k].vStatus > 0) { for (j = 0; j < nameList[k].vLen; j ++) { switch (nameList[k].vType) { case N_I: fprintf (fp, "%d ", *NP_I); break; case N_R: fprintf (fp, "%#g ", *NP_R); break; } } } switch (nameList[k].vStatus) { case 0: fprintf (fp, "** no data"); break; case 1: break; case 2: fprintf (fp, "** missing data"); break; case 3: fprintf (fp, "** extra data"); break; case 4: fprintf (fp, "** multiply defined"); break; } fprintf (fp, "\n"); } fprintf (fp, "----\n"); } void Init_reciprocal_space(Snapshot * snap) { /*! * * 고로 원래 목적과 달리 delta_k를 2pi/L * n(정수)로 맞추도록 한다. * \param snap Snaptshot* 스냅샷 포인터 */ extern real kVal; real new_dk; int n_mul; real L[3]; // zero initalize current time value // we assume L0=L1 = L2 L[0] = snap->box.xhigh- snap->box.xlow; L[1] = snap->box.yhigh- snap->box.ylow; L[2] = snap->box.zhigh- snap->box.zlow; /* for (k = 0; k < sizeof (nameList) / sizeof (NameList); k ++) { * if ( strcmp(vName, nameList[k].vName)== 0 ) { * j=0; * p_kVal = NP_R; * } * } * printf( "kVal %p kValp %p\n", &kVal, p_kVal); */ n_mul = round(kVal/ (2.*M_PI/ L[0] )); if (n_mul <=0) n_mul =1; new_dk = (2.*M_PI/L[0]) * n_mul; fprintf(stderr, "Update for input dk param: %f -> %f \n" , kVal, new_dk); kVal = new_dk; }
omp_smithW-v1-refinedOrig.c
/********************************************************************************* * Smith€“Waterman algorithm * Purpose: Local alignment of nucleotide or protein sequences * Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro * * Compilation: g++ omp_smithW.c -o omp_smithW -fopenmp -DDEBUG // debugging mode * g++ omp_smithW.c -O3 -DNDEBUG=1 -o omp_smithW -fopenmp // production run * Execution: ./omp_smithW <number_of_col> <number_of_rows> * * Updated by C. Liao, Jan 2nd, 2019 *********************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <time.h> #include <assert.h> #include <chrono> #include <stdbool.h> // C99 does not support the boolean data type /*-------------------------------------------------------------------- * Text Tweaks */ #define RESET "\033[0m" #define BOLDRED "\033[1m\033[31m" /* Bold Red */ /* End of text tweaks */ /*-------------------------------------------------------------------- * Constants */ #define PATH -1 #define NONE 0 #define UP 1 #define LEFT 2 #define DIAGONAL 3 /* End of constants */ /*-------------------------------------------------------------------- * Helpers */ #define min(x, y) (((x) < (y)) ? (x) : (y)) #define max(a,b) ((a) > (b) ? a : b) #ifndef _OPENMP #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t, NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double omp_get_wtime() { return time_stamp(); } #endif // #define DEBUG /* End of Helpers */ /*-------------------------------------------------------------------- * Functions Prototypes */ void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos); int matchMissmatchScore(long long int i, long long int j); int backtrack(int* P, long long int maxPos); void printMatrix(int* matrix); void printPredecessorMatrix(int* matrix); void generate(void); long long int nElement(long long int i); void calcFirstDiagElement(long long int i, long long int *si, long long int *sj); /* End of prototypes */ /*-------------------------------------------------------------------- * Global Variables */ bool useBuiltInData=true; //Defines size of strings to be compared long long int m = 8 ; //Columns - Size of string a long long int n = 9; //Lines - Size of string b // the generated scoring matrix's size is m++ and n++ later to have the first row/column as 0s. //Defines scores int matchScore = 3; int missmatchScore = -3; int gapScore = -2; //Strings over the Alphabet Sigma char *a, *b; /* End of global variables */ /*-------------------------------------------------------------------- * Function: main */ int main(int argc, char* argv[]) { typedef std::chrono::time_point<std::chrono::system_clock> time_point; // thread_count is no longer used int thread_count; if (argc==3) { m = strtoll(argv[1], NULL, 10); n = strtoll(argv[2], NULL, 10); useBuiltInData = false; } //#ifdef DEBUG if (useBuiltInData) printf ("Using built-in data for testing ..\n"); printf("Problem size: Matrix[%lld][%lld]\n", n, m); //#endif //Allocates a and b a = (char*) malloc(m * sizeof(char)); b = (char*) malloc(n * sizeof(char)); //Because now we have zeros m++; n++; //Allocates similarity matrix H int *H; H = (int *) calloc(m * n, sizeof(int)); //Allocates predecessor matrix P int *P; P = (int *)calloc(m * n, sizeof(int)); unsigned long long sz = (m+n +2*m*n)*sizeof(int)/1024/1024; if (sz>=1024) printf("Total memory footprint is:%llu GB\n", sz/1024) ; else printf("Total memory footprint is:%llu MB\n", sz) ; if (useBuiltInData) { // https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example // Using the wiki example to verify the results b[0] = 'G'; b[1] = 'G'; b[2] = 'T'; b[3] = 'T'; b[4] = 'G'; b[5] = 'A'; b[6] = 'C'; b[7] = 'T'; b[8] = 'A'; a[0] = 'T'; a[1] = 'G'; a[2] = 'T'; a[3] = 'T'; a[4] = 'A'; a[5] = 'C'; a[6] = 'G'; a[7] = 'G'; } else { //Gen random arrays a and b generate(); } //Start position for backtrack long long int maxPos = 0; //Calculates the similarity matrix long long int i, j; // The way to generate all wavefront is to go through the top edge elements // starting from the left top of the matrix, go to the bottom top -> down, then left->right // total top edge element count = dim1_size + dim2_size -1 //Because now we have zeros ((m-1) + (n-1) - 1) long long int nDiag = m + n - 3; #ifdef DEBUG printf("nDiag=%d\n", nDiag); printf("Number of wavefront lines and their first element positions:\n"); #endif #ifdef _OPENMP #pragma omp parallel { #pragma omp master { thread_count = omp_get_num_threads(); printf ("Using %d out of max %d threads...", thread_count, omp_get_max_threads()); } } #endif //Gets Initial time // double initialTime = omp_get_wtime(); time_point starttime = std::chrono::system_clock::now(); #pragma omp parallel default(none) shared(H, P, maxPos, nDiag, j) private(i) { for (i = 1; i <= nDiag; ++i) // start from 1 since 0 is the boundary padding { long long int nEle, si, sj; nEle = nElement(i); calcFirstDiagElement(i, &si, &sj); #pragma omp for private(j) for (j = 0; j < nEle; ++j) { // going upwards : anti-diagnol direction long long int ai = si - j ; // going up vertically long long int aj = sj + j; // going right in horizontal similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside } } } int len = backtrack(P, maxPos); time_point endtime = std::chrono::system_clock::now(); int elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(endtime-starttime).count(); printf("\nElapsed time: %d ms Path length: %d \n\n", elapsed, len); if (useBuiltInData) { printf ("Verifying results using the builtinIn data: %s\n", (H[n*m-1]==7)?"true":"false"); assert (H[n*m-1]==7); } #ifdef DEBUG printf("\nSimilarity Matrix:\n"); printMatrix(H); printf("\nPredecessor Matrix:\n"); printPredecessorMatrix(P); #endif //Frees similarity matrixes // free(H); // free(P); //Frees input arrays // free(a); // free(b); return 0; } /* End of main */ /*-------------------------------------------------------------------- * Function: nElement * Purpose: Calculate the number of i-diagonal's elements * i value range 1 to nDiag. we inclulde the upper bound value. 0 is for the padded wavefront, which is ignored. */ long long int nElement(long long int i) { if (i < m && i < n) { // smaller than both directions //Number of elements in the diagonal is increasing return i; } else if (i < max(m, n)) { // smaller than only one direction //Number of elements in the diagonal is stable long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size return min - 1; } else { //Number of elements in the diagonal is decreasing long int min = min(m, n); return 2 * min - i + llabs(m - n) - 2; } } /*-------------------------------------------------------------------- * Function: calcElement: expect valid i value is from 1 to nDiag. since the first one is 0 padding * Purpose: Calculate the position of (si, sj)-element * n rows, m columns: we sweep the matrix on the left edge then bottom edge to get the wavefront */ void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) { // Calculate the first element of diagonal if (i < n) { // smaller than row count *si = i; *sj = 1; // start from the j==1 since j==0 is the padding } else { // now we sweep horizontally at the bottom of the matrix *si = n - 1; // i is fixed *sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1 } } /* // understanding the calculation by an example n =6 // row m =2 // col padded scoring matrix n=7 m=3 0 1 2 ------- 0 x x x 1 x x x 2 x x x 3 x x x 4 x x x 5 x x x 6 x x x We should peel off top row and left column since they are the padding the remaining 6x2 sub matrix is what is interesting for us Now find the number of wavefront lines and their first element's position in the scoring matrix total diagnol frontwave = (n-1) + (m-1) -1 // submatrix row+column -1 We use the left most element in each wavefront line as its first element. Then we have the first elements like (1,1), (2,1) (3,1) .. (6,1) (6,2) */ /*-------------------------------------------------------------------- * Function: SimilarityScore * Purpose: Calculate value of scoring matrix element H(i,j) : the maximum Similarity-Score H(i,j) * int *P; the predecessor array,storing which of the three elements is picked with max value */ void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) { int up, left, diag; //Stores index of element long long int index = m * i + j; //Get element above up = H[index - m] + gapScore; //Get element on the left left = H[index - 1] + gapScore; //Get element on the diagonal diag = H[index - m - 1] + matchMissmatchScore(i, j); //Calculates the maximum int max = NONE; int pred = NONE; /* === Matrix === * a[0] ... a[n] * b[0] * ... * b[n] * * generate 'a' from 'b', if '←' insert e '↑' remove * a=GAATTCA * b=GACTT-A * * generate 'b' from 'a', if '←' insert e '↑' remove * b=GACTT-A * a=GAATTCA */ if (diag > max) { //same letter ↖ max = diag; pred = DIAGONAL; } if (up > max) { //remove letter ↑ max = up; pred = UP; } if (left > max) { //insert letter ← max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; //Updates maximum score to be used as seed on backtrack if (max > H[*maxPos]) { #pragma omp critical *maxPos = index; } } /* End of similarityScore */ /*-------------------------------------------------------------------- * Function: matchMissmatchScore * Purpose: Similarity function on the alphabet for match/missmatch */ int matchMissmatchScore(long long int i, long long int j) { if (a[j - 1] == b[i - 1]) return matchScore; else return missmatchScore; } /* End of matchMissmatchScore */ /*-------------------------------------------------------------------- * Function: backtrack * Purpose: Modify matrix to print, path change from value to PATH */ int backtrack(int* P, long long int maxPos) { //hold maxPos value long long int predPos; int len = 0; //backtrack from maxPos to startPos = 0 do { switch (P[maxPos]) { case DIAGONAL: predPos = maxPos - m - 1; break; case UP: predPos = maxPos - m; break; case LEFT: predPos = maxPos - 1; break; default:; } #ifdef DEBUG P[maxPos] *= PATH; #endif maxPos = predPos; ++len; } while (P[maxPos] != NONE); return len; } /* End of backtrack */ /*-------------------------------------------------------------------- * Function: printMatrix * Purpose: Print Matrix */ void printMatrix(int* matrix) { long long int i, j; printf("-\t-\t"); for (j = 0; j < m-1; j++) { printf("%c\t", a[j]); } printf("\n-\t"); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c\t", b[i-1]); printf("%d\t", matrix[m * i + j]); } printf("\n"); } } /* End of printMatrix */ /*-------------------------------------------------------------------- * Function: printPredecessorMatrix * Purpose: Print predecessor matrix */ void printPredecessorMatrix(int* matrix) { long long int i, j, index; printf(" "); for (j = 0; j < m-1; j++) { printf("%c ", a[j]); } printf("\n "); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c ", b[i-1]); index = m * i + j; if (matrix[index] < 0) { printf(BOLDRED); if (matrix[index] == -UP) printf("↑ "); else if (matrix[index] == -LEFT) printf("← "); else if (matrix[index] == -DIAGONAL) printf("↖ "); else printf("- "); printf(RESET); } else { if (matrix[index] == UP) printf("↑ "); else if (matrix[index] == LEFT) printf("← "); else if (matrix[index] == DIAGONAL) printf("↖ "); else printf("- "); } } printf("\n"); } } /* End of printPredecessorMatrix */ /*-------------------------------------------------------------------- * Function: generate * Purpose: Generate arrays a and b */ void generate() { //Random seed srand(time(NULL)); //Generates the values of a long long int i; for (i = 0; i < m; i++) { int aux = rand() % 4; if (aux == 0) a[i] = 'A'; else if (aux == 2) a[i] = 'C'; else if (aux == 3) a[i] = 'G'; else a[i] = 'T'; } //Generates the values of b for (i = 0; i < n; i++) { int aux = rand() % 4; if (aux == 0) b[i] = 'A'; else if (aux == 2) b[i] = 'C'; else if (aux == 3) b[i] = 'G'; else b[i] = 'T'; } } /* End of generate */ /*-------------------------------------------------------------------- * External References: * http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 * http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm * http://baba.sourceforge.net/ */
11_master_thread.c
#include <stdio.h> #include <omp.h> int main(){ #pragma omp parallel printf("Parallel from thread %i\n", omp_get_thread_num()); #pragma omp master { printf("Single from thread %i\n", omp_get_thread_num()); } }
paraGraph.h
#ifndef __PARAGRAPH_H__ #define __PARAGRAPH_H__ #include <iostream> #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #include <string.h> #include "vertex_set.h" #include "graph.h" #include <omp.h> #include "mic.h" #define THRESHOLD 10000 // #define DEBUG /* * edgeMap -- * * Students will implement this function. * * The input argument f is a class with the following methods defined: * bool update(Vertex src, Vertex dst) * bool cond(Vertex v) * * See apps/bfs.cpp for an example of such a class definition. * * When the argument removeDuplicates is false, the implementation of * edgeMap need not remove duplicate vertices from the VertexSet it * creates when iterating over edges. This is a performance * optimization when the application knows (and can tell ParaGraph) * that f.update() guarantees that duplicate vertices cannot appear in * the output vertex set. * * Further notes: the implementation of edgeMap is templated on the * type of this object, which allows for higher performance code * generation as these methods will be inlined. */ template <class F> VertexSet *edgeMap(Graph g, VertexSet *u, F &f, bool removeDuplicates=true) { VertexSetType type = setType(g, u); //frontier VertexSet* set = newVertexSet(type, g->num_nodes, g->num_nodes); if(type==DENSE){ #pragma omp parallel for schedule(guided) //loop through all vertexes in Graph, if vertexes' incoming edge is in u, add itself into frontier... for (int vertex=0; vertex<g->num_nodes; vertex++){ //if v has been visited, continue if(!f.cond(vertex)) continue; if(set->curSetFlags[vertex]) //||u->curSetFlags[vertex] TODO why this is not working? continue; const Vertex* start = incoming_begin(g, vertex); const Vertex* end = incoming_end(g, vertex); for (const Vertex* v=start; v!=end; v++) { if(!f.cond(*v, vertex)) continue; if (u->curSetFlags[*v] && f.update(*v, vertex)) set->curSetFlags[vertex] = true; } } } else if(type == PAGERANK) { //bottom up #pragma omp parallel for schedule(guided) for (int vertex=0; vertex<g->num_nodes; vertex++){ if(u->curSetFlags[vertex]==true) continue; const Vertex* start = incoming_begin(g, vertex); const Vertex* end = incoming_end(g, vertex); for (const Vertex* v=start; v!=end; v++) { if (u->curSetFlags[*v] && f.update(*v, vertex)) set->curSetFlags[vertex] = true; } } } else if (type==SPARSE) { //top down #pragma omp parallel for schedule(guided) for(int i=0; i<g->num_nodes; i++){ if(!u->curSetFlags[i]) continue; const Vertex *start = outgoing_begin(g, i); const Vertex *end = outgoing_end(g, i); for (const Vertex *v = start; v != end; v++) { if (f.cond(i,*v) && f.update(i, *v)) set->curSetFlags[*v] = true; } } } int sum = 0; #pragma omp parallel for schedule(static) reduction(+:sum) for (int i=0; i<g->num_nodes; i++) { sum += set->curSetFlags[i]; } set->size = sum; return set; } /* * vertexMap -- * * Students will implement this function. * * The input argument f is a class with the following methods defined: * bool operator()(Vertex v) * * See apps/kBFS.cpp for an example implementation. * * Note that you'll call the function on a vertex as follows: * Vertex v; * bool result = f(v) * * If returnSet is false, then the implementation of vertexMap should * return NULL (it need not build and create a vertex set) */ template <class F> VertexSet *vertexMap(VertexSet *u, F &f, bool returnSet=true) { // TODO: Implement VertexSet* set = NULL; if (returnSet) { set = newVertexSet(u->type, u->numNodes, u->numNodes); #pragma omp parallel for schedule(guided) for (int i=0; i<u->numNodes; i++) { if (u->curSetFlags[i]==1 && f(i)) set->curSetFlags[i] = true; } int sum = 0; #pragma omp parallel for schedule(static) reduction(+:sum) for (int i=0; i<u->numNodes; i++) sum += set->curSetFlags[i]; set->size = sum; } else { #pragma omp parallel for schedule(guided) for (int i=0; i<u->numNodes; i++) { if (u->curSetFlags[i] && !f(i)) u->curSetFlags[i] = false; } int sum = 0; #pragma omp parallel for schedule(static) reduction(+:sum) for (int i=0; i<u->numNodes; i++) sum += u->curSetFlags[i]; u->size = sum; } return set; } #endif /* __PARAGRAPH_H__ */
t.c
# 1 "qla-1.7.1/QLA_D3_V_vpeq_M_times_pV.c" # 1 "<built-in>" 1 # 1 "<built-in>" 3 # 341 "<built-in>" 3 # 1 "<command line>" 1 # 1 "<built-in>" 2 # 1 "qla-1.7.1/QLA_D3_V_vpeq_M_times_pV.c" 2 # 1 "/usr/include/stdio.h" 1 3 4 # 27 "/usr/include/stdio.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" 1 3 4 # 33 "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" 3 4 # 1 "/usr/include/features.h" 1 3 4 # 402 "/usr/include/features.h" 3 4 # 1 "/usr/include/stdc-predef.h" 1 3 4 # 403 "/usr/include/features.h" 2 3 4 # 424 "/usr/include/features.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" 1 3 4 # 427 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" 1 3 4 # 428 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/long-double.h" 1 3 4 # 429 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" 2 3 4 # 425 "/usr/include/features.h" 2 3 4 # 448 "/usr/include/features.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" 1 3 4 # 10 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/gnu/stubs-64.h" 1 3 4 # 11 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" 2 3 4 # 449 "/usr/include/features.h" 2 3 4 # 34 "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" 2 3 4 # 28 "/usr/include/stdio.h" 2 3 4 # 1 "/common/home/sc1696/FPSanX/build/lib/clang/10.0.0/include/stddef.h" 1 3 4 # 46 "/common/home/sc1696/FPSanX/build/lib/clang/10.0.0/include/stddef.h" 3 4 typedef long unsigned int size_t; # 34 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" 1 3 4 # 27 "/usr/include/x86_64-linux-gnu/bits/types.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" 1 3 4 # 28 "/usr/include/x86_64-linux-gnu/bits/types.h" 2 3 4 typedef unsigned char __u_char; typedef unsigned short int __u_short; typedef unsigned int __u_int; typedef unsigned long int __u_long; typedef signed char __int8_t; typedef unsigned char __uint8_t; typedef signed short int __int16_t; typedef unsigned short int __uint16_t; typedef signed int __int32_t; typedef unsigned int __uint32_t; typedef signed long int __int64_t; typedef unsigned long int __uint64_t; typedef long int __quad_t; typedef unsigned long int __u_quad_t; typedef long int __intmax_t; typedef unsigned long int __uintmax_t; # 130 "/usr/include/x86_64-linux-gnu/bits/types.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" 1 3 4 # 131 "/usr/include/x86_64-linux-gnu/bits/types.h" 2 3 4 typedef unsigned long int __dev_t; typedef unsigned int __uid_t; typedef unsigned int __gid_t; typedef unsigned long int __ino_t; typedef unsigned long int __ino64_t; typedef unsigned int __mode_t; typedef unsigned long int __nlink_t; typedef long int __off_t; typedef long int __off64_t; typedef int __pid_t; typedef struct { int __val[2]; } __fsid_t; typedef long int __clock_t; typedef unsigned long int __rlim_t; typedef unsigned long int __rlim64_t; typedef unsigned int __id_t; typedef long int __time_t; typedef unsigned int __useconds_t; typedef long int __suseconds_t; typedef int __daddr_t; typedef int __key_t; typedef int __clockid_t; typedef void * __timer_t; typedef long int __blksize_t; typedef long int __blkcnt_t; typedef long int __blkcnt64_t; typedef unsigned long int __fsblkcnt_t; typedef unsigned long int __fsblkcnt64_t; typedef unsigned long int __fsfilcnt_t; typedef unsigned long int __fsfilcnt64_t; typedef long int __fsword_t; typedef long int __ssize_t; typedef long int __syscall_slong_t; typedef unsigned long int __syscall_ulong_t; typedef __off64_t __loff_t; typedef char *__caddr_t; typedef long int __intptr_t; typedef unsigned int __socklen_t; typedef int __sig_atomic_t; # 36 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/types/__FILE.h" 1 3 4 struct _IO_FILE; typedef struct _IO_FILE __FILE; # 37 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/types/FILE.h" 1 3 4 struct _IO_FILE; typedef struct _IO_FILE FILE; # 38 "/usr/include/stdio.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/libio.h" 1 3 4 # 35 "/usr/include/x86_64-linux-gnu/bits/libio.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/_G_config.h" 1 3 4 # 19 "/usr/include/x86_64-linux-gnu/bits/_G_config.h" 3 4 # 1 "/common/home/sc1696/FPSanX/build/lib/clang/10.0.0/include/stddef.h" 1 3 4 # 20 "/usr/include/x86_64-linux-gnu/bits/_G_config.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h" 1 3 4 # 13 "/usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h" 3 4 typedef struct { int __count; union { unsigned int __wch; char __wchb[4]; } __value; } __mbstate_t; # 22 "/usr/include/x86_64-linux-gnu/bits/_G_config.h" 2 3 4 typedef struct { __off_t __pos; __mbstate_t __state; } _G_fpos_t; typedef struct { __off64_t __pos; __mbstate_t __state; } _G_fpos64_t; # 36 "/usr/include/x86_64-linux-gnu/bits/libio.h" 2 3 4 # 53 "/usr/include/x86_64-linux-gnu/bits/libio.h" 3 4 # 1 "/common/home/sc1696/FPSanX/build/lib/clang/10.0.0/include/stdarg.h" 1 3 4 # 14 "/common/home/sc1696/FPSanX/build/lib/clang/10.0.0/include/stdarg.h" 3 4 typedef __builtin_va_list va_list; # 32 "/common/home/sc1696/FPSanX/build/lib/clang/10.0.0/include/stdarg.h" 3 4 typedef __builtin_va_list __gnuc_va_list; # 54 "/usr/include/x86_64-linux-gnu/bits/libio.h" 2 3 4 # 149 "/usr/include/x86_64-linux-gnu/bits/libio.h" 3 4 struct _IO_jump_t; struct _IO_FILE; typedef void _IO_lock_t; struct _IO_marker { struct _IO_marker *_next; struct _IO_FILE *_sbuf; int _pos; # 177 "/usr/include/x86_64-linux-gnu/bits/libio.h" 3 4 }; enum __codecvt_result { __codecvt_ok, __codecvt_partial, __codecvt_error, __codecvt_noconv }; # 245 "/usr/include/x86_64-linux-gnu/bits/libio.h" 3 4 struct _IO_FILE { int _flags; char* _IO_read_ptr; char* _IO_read_end; char* _IO_read_base; char* _IO_write_base; char* _IO_write_ptr; char* _IO_write_end; char* _IO_buf_base; char* _IO_buf_end; char *_IO_save_base; char *_IO_backup_base; char *_IO_save_end; struct _IO_marker *_markers; struct _IO_FILE *_chain; int _fileno; int _flags2; __off_t _old_offset; unsigned short _cur_column; signed char _vtable_offset; char _shortbuf[1]; _IO_lock_t *_lock; # 293 "/usr/include/x86_64-linux-gnu/bits/libio.h" 3 4 __off64_t _offset; void *__pad1; void *__pad2; void *__pad3; void *__pad4; size_t __pad5; int _mode; char _unused2[15 * sizeof (int) - 4 * sizeof (void *) - sizeof (size_t)]; }; typedef struct _IO_FILE _IO_FILE; struct _IO_FILE_plus; extern struct _IO_FILE_plus _IO_2_1_stdin_; extern struct _IO_FILE_plus _IO_2_1_stdout_; extern struct _IO_FILE_plus _IO_2_1_stderr_; # 337 "/usr/include/x86_64-linux-gnu/bits/libio.h" 3 4 typedef __ssize_t __io_read_fn (void *__cookie, char *__buf, size_t __nbytes); typedef __ssize_t __io_write_fn (void *__cookie, const char *__buf, size_t __n); typedef int __io_seek_fn (void *__cookie, __off64_t *__pos, int __w); typedef int __io_close_fn (void *__cookie); # 389 "/usr/include/x86_64-linux-gnu/bits/libio.h" 3 4 extern int __underflow (_IO_FILE *); extern int __uflow (_IO_FILE *); extern int __overflow (_IO_FILE *, int); # 433 "/usr/include/x86_64-linux-gnu/bits/libio.h" 3 4 extern int _IO_getc (_IO_FILE *__fp); extern int _IO_putc (int __c, _IO_FILE *__fp); extern int _IO_feof (_IO_FILE *__fp) __attribute__ ((__nothrow__ )); extern int _IO_ferror (_IO_FILE *__fp) __attribute__ ((__nothrow__ )); extern int _IO_peekc_locked (_IO_FILE *__fp); extern void _IO_flockfile (_IO_FILE *) __attribute__ ((__nothrow__ )); extern void _IO_funlockfile (_IO_FILE *) __attribute__ ((__nothrow__ )); extern int _IO_ftrylockfile (_IO_FILE *) __attribute__ ((__nothrow__ )); # 462 "/usr/include/x86_64-linux-gnu/bits/libio.h" 3 4 extern int _IO_vfscanf (_IO_FILE * __restrict, const char * __restrict, __gnuc_va_list, int *__restrict); extern int _IO_vfprintf (_IO_FILE *__restrict, const char *__restrict, __gnuc_va_list); extern __ssize_t _IO_padn (_IO_FILE *, int, __ssize_t); extern size_t _IO_sgetn (_IO_FILE *, void *, size_t); extern __off64_t _IO_seekoff (_IO_FILE *, __off64_t, int, int); extern __off64_t _IO_seekpos (_IO_FILE *, __off64_t, int); extern void _IO_free_backup_area (_IO_FILE *) __attribute__ ((__nothrow__ )); # 42 "/usr/include/stdio.h" 2 3 4 typedef __gnuc_va_list va_list; # 57 "/usr/include/stdio.h" 3 4 typedef __off_t off_t; # 71 "/usr/include/stdio.h" 3 4 typedef __ssize_t ssize_t; typedef _G_fpos_t fpos_t; # 131 "/usr/include/stdio.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/stdio_lim.h" 1 3 4 # 132 "/usr/include/stdio.h" 2 3 4 extern struct _IO_FILE *stdin; extern struct _IO_FILE *stdout; extern struct _IO_FILE *stderr; extern int remove (const char *__filename) __attribute__ ((__nothrow__ )); extern int rename (const char *__old, const char *__new) __attribute__ ((__nothrow__ )); extern int renameat (int __oldfd, const char *__old, int __newfd, const char *__new) __attribute__ ((__nothrow__ )); extern FILE *tmpfile (void) ; # 173 "/usr/include/stdio.h" 3 4 extern char *tmpnam (char *__s) __attribute__ ((__nothrow__ )) ; extern char *tmpnam_r (char *__s) __attribute__ ((__nothrow__ )) ; # 190 "/usr/include/stdio.h" 3 4 extern char *tempnam (const char *__dir, const char *__pfx) __attribute__ ((__nothrow__ )) __attribute__ ((__malloc__)) ; extern int fclose (FILE *__stream); extern int fflush (FILE *__stream); # 213 "/usr/include/stdio.h" 3 4 extern int fflush_unlocked (FILE *__stream); # 232 "/usr/include/stdio.h" 3 4 extern FILE *fopen (const char *__restrict __filename, const char *__restrict __modes) ; extern FILE *freopen (const char *__restrict __filename, const char *__restrict __modes, FILE *__restrict __stream) ; # 265 "/usr/include/stdio.h" 3 4 extern FILE *fdopen (int __fd, const char *__modes) __attribute__ ((__nothrow__ )) ; # 278 "/usr/include/stdio.h" 3 4 extern FILE *fmemopen (void *__s, size_t __len, const char *__modes) __attribute__ ((__nothrow__ )) ; extern FILE *open_memstream (char **__bufloc, size_t *__sizeloc) __attribute__ ((__nothrow__ )) ; extern void setbuf (FILE *__restrict __stream, char *__restrict __buf) __attribute__ ((__nothrow__ )); extern int setvbuf (FILE *__restrict __stream, char *__restrict __buf, int __modes, size_t __n) __attribute__ ((__nothrow__ )); extern void setbuffer (FILE *__restrict __stream, char *__restrict __buf, size_t __size) __attribute__ ((__nothrow__ )); extern void setlinebuf (FILE *__stream) __attribute__ ((__nothrow__ )); extern int fprintf (FILE *__restrict __stream, const char *__restrict __format, ...); extern int printf (const char *__restrict __format, ...); extern int sprintf (char *__restrict __s, const char *__restrict __format, ...) __attribute__ ((__nothrow__)); extern int vfprintf (FILE *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg); extern int vprintf (const char *__restrict __format, __gnuc_va_list __arg); extern int vsprintf (char *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __attribute__ ((__nothrow__)); extern int snprintf (char *__restrict __s, size_t __maxlen, const char *__restrict __format, ...) __attribute__ ((__nothrow__)) __attribute__ ((__format__ (__printf__, 3, 4))); extern int vsnprintf (char *__restrict __s, size_t __maxlen, const char *__restrict __format, __gnuc_va_list __arg) __attribute__ ((__nothrow__)) __attribute__ ((__format__ (__printf__, 3, 0))); # 365 "/usr/include/stdio.h" 3 4 extern int vdprintf (int __fd, const char *__restrict __fmt, __gnuc_va_list __arg) __attribute__ ((__format__ (__printf__, 2, 0))); extern int dprintf (int __fd, const char *__restrict __fmt, ...) __attribute__ ((__format__ (__printf__, 2, 3))); extern int fscanf (FILE *__restrict __stream, const char *__restrict __format, ...) ; extern int scanf (const char *__restrict __format, ...) ; extern int sscanf (const char *__restrict __s, const char *__restrict __format, ...) __attribute__ ((__nothrow__ )); # 395 "/usr/include/stdio.h" 3 4 extern int fscanf (FILE *__restrict __stream, const char *__restrict __format, ...) __asm__ ("" "__isoc99_fscanf") ; extern int scanf (const char *__restrict __format, ...) __asm__ ("" "__isoc99_scanf") ; extern int sscanf (const char *__restrict __s, const char *__restrict __format, ...) __asm__ ("" "__isoc99_sscanf") __attribute__ ((__nothrow__ )); # 420 "/usr/include/stdio.h" 3 4 extern int vfscanf (FILE *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __attribute__ ((__format__ (__scanf__, 2, 0))) ; extern int vscanf (const char *__restrict __format, __gnuc_va_list __arg) __attribute__ ((__format__ (__scanf__, 1, 0))) ; extern int vsscanf (const char *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __attribute__ ((__nothrow__ )) __attribute__ ((__format__ (__scanf__, 2, 0))); # 443 "/usr/include/stdio.h" 3 4 extern int vfscanf (FILE *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __asm__ ("" "__isoc99_vfscanf") __attribute__ ((__format__ (__scanf__, 2, 0))) ; extern int vscanf (const char *__restrict __format, __gnuc_va_list __arg) __asm__ ("" "__isoc99_vscanf") __attribute__ ((__format__ (__scanf__, 1, 0))) ; extern int vsscanf (const char *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __asm__ ("" "__isoc99_vsscanf") __attribute__ ((__nothrow__ )) __attribute__ ((__format__ (__scanf__, 2, 0))); # 477 "/usr/include/stdio.h" 3 4 extern int fgetc (FILE *__stream); extern int getc (FILE *__stream); extern int getchar (void); # 495 "/usr/include/stdio.h" 3 4 extern int getc_unlocked (FILE *__stream); extern int getchar_unlocked (void); # 506 "/usr/include/stdio.h" 3 4 extern int fgetc_unlocked (FILE *__stream); # 517 "/usr/include/stdio.h" 3 4 extern int fputc (int __c, FILE *__stream); extern int putc (int __c, FILE *__stream); extern int putchar (int __c); # 537 "/usr/include/stdio.h" 3 4 extern int fputc_unlocked (int __c, FILE *__stream); extern int putc_unlocked (int __c, FILE *__stream); extern int putchar_unlocked (int __c); extern int getw (FILE *__stream); extern int putw (int __w, FILE *__stream); extern char *fgets (char *__restrict __s, int __n, FILE *__restrict __stream) ; # 603 "/usr/include/stdio.h" 3 4 extern __ssize_t __getdelim (char **__restrict __lineptr, size_t *__restrict __n, int __delimiter, FILE *__restrict __stream) ; extern __ssize_t getdelim (char **__restrict __lineptr, size_t *__restrict __n, int __delimiter, FILE *__restrict __stream) ; extern __ssize_t getline (char **__restrict __lineptr, size_t *__restrict __n, FILE *__restrict __stream) ; extern int fputs (const char *__restrict __s, FILE *__restrict __stream); extern int puts (const char *__s); extern int ungetc (int __c, FILE *__stream); extern size_t fread (void *__restrict __ptr, size_t __size, size_t __n, FILE *__restrict __stream) ; extern size_t fwrite (const void *__restrict __ptr, size_t __size, size_t __n, FILE *__restrict __s); # 673 "/usr/include/stdio.h" 3 4 extern size_t fread_unlocked (void *__restrict __ptr, size_t __size, size_t __n, FILE *__restrict __stream) ; extern size_t fwrite_unlocked (const void *__restrict __ptr, size_t __size, size_t __n, FILE *__restrict __stream); extern int fseek (FILE *__stream, long int __off, int __whence); extern long int ftell (FILE *__stream) ; extern void rewind (FILE *__stream); # 707 "/usr/include/stdio.h" 3 4 extern int fseeko (FILE *__stream, __off_t __off, int __whence); extern __off_t ftello (FILE *__stream) ; # 731 "/usr/include/stdio.h" 3 4 extern int fgetpos (FILE *__restrict __stream, fpos_t *__restrict __pos); extern int fsetpos (FILE *__stream, const fpos_t *__pos); # 757 "/usr/include/stdio.h" 3 4 extern void clearerr (FILE *__stream) __attribute__ ((__nothrow__ )); extern int feof (FILE *__stream) __attribute__ ((__nothrow__ )) ; extern int ferror (FILE *__stream) __attribute__ ((__nothrow__ )) ; extern void clearerr_unlocked (FILE *__stream) __attribute__ ((__nothrow__ )); extern int feof_unlocked (FILE *__stream) __attribute__ ((__nothrow__ )) ; extern int ferror_unlocked (FILE *__stream) __attribute__ ((__nothrow__ )) ; extern void perror (const char *__s); # 1 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" 1 3 4 # 26 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" 3 4 extern int sys_nerr; extern const char *const sys_errlist[]; # 782 "/usr/include/stdio.h" 2 3 4 extern int fileno (FILE *__stream) __attribute__ ((__nothrow__ )) ; extern int fileno_unlocked (FILE *__stream) __attribute__ ((__nothrow__ )) ; # 800 "/usr/include/stdio.h" 3 4 extern FILE *popen (const char *__command, const char *__modes) ; extern int pclose (FILE *__stream); extern char *ctermid (char *__s) __attribute__ ((__nothrow__ )); # 840 "/usr/include/stdio.h" 3 4 extern void flockfile (FILE *__stream) __attribute__ ((__nothrow__ )); extern int ftrylockfile (FILE *__stream) __attribute__ ((__nothrow__ )) ; extern void funlockfile (FILE *__stream) __attribute__ ((__nothrow__ )); # 4 "qla-1.7.1/QLA_D3_V_vpeq_M_times_pV.c" 2 # 1 "./qla-1.7.1/qla_config.h" 1 # 5 "qla-1.7.1/QLA_D3_V_vpeq_M_times_pV.c" 2 # 1 "./qla-1.7.1/qla_types.h" 1 # 13 "./qla-1.7.1/qla_types.h" # 1 "./qla-1.7.1/qla_complex.h" 1 # 1 "/common/home/sc1696/FPSanX/build/lib/clang/10.0.0/include/float.h" 1 3 # 5 "./qla-1.7.1/qla_complex.h" 2 typedef int QLA_Int; typedef float QLA_F_Real; typedef double QLA_D_Real; typedef long double QLA_Q_Real; # 1 "./qla-1.7.1/qla_complex_c99.h" 1 typedef float _Complex QLA_F_Complex __attribute__ ((aligned (8))); typedef double _Complex QLA_D_Complex __attribute__ ((aligned (16))); typedef long double _Complex QLA_Q_Complex __attribute__ ((aligned )); # 25 "./qla-1.7.1/qla_complex.h" 2 # 14 "./qla-1.7.1/qla_types.h" 2 # 39 "./qla-1.7.1/qla_types.h" typedef QLA_F_Complex QLA_F3_ColorVector[3]; typedef QLA_F_Complex QLA_F3_HalfFermion[3][(4)/2]; typedef QLA_F_Complex QLA_F3_DiracFermion[3][4]; typedef QLA_F_Complex QLA_F3_ColorMatrix[3][3]; typedef QLA_F_Complex QLA_F3_DiracPropagator[3][4][3][4]; # 55 "./qla-1.7.1/qla_types.h" typedef QLA_D_Complex QLA_D3_ColorVector[3]; typedef QLA_D_Complex QLA_D3_HalfFermion[3][(4)/2]; typedef QLA_D_Complex QLA_D3_DiracFermion[3][4]; typedef QLA_D_Complex QLA_D3_ColorMatrix[3][3]; typedef QLA_D_Complex QLA_D3_DiracPropagator[3][4][3][4]; # 71 "./qla-1.7.1/qla_types.h" typedef QLA_Q_Complex QLA_Q3_ColorVector[3]; typedef QLA_Q_Complex QLA_Q3_HalfFermion[3][(4)/2]; typedef QLA_Q_Complex QLA_Q3_DiracFermion[3][4]; typedef QLA_Q_Complex QLA_Q3_ColorMatrix[3][3]; typedef QLA_Q_Complex QLA_Q3_DiracPropagator[3][4][3][4]; # 88 "./qla-1.7.1/qla_types.h" typedef QLA_F_Complex QLA_F2_ColorVector[2]; typedef QLA_F_Complex QLA_F2_HalfFermion[2][(4)/2]; typedef QLA_F_Complex QLA_F2_DiracFermion[2][4]; typedef QLA_F_Complex QLA_F2_ColorMatrix[2][2]; typedef QLA_F_Complex QLA_F2_DiracPropagator[2][4][2][4]; # 104 "./qla-1.7.1/qla_types.h" typedef QLA_D_Complex QLA_D2_ColorVector[2]; typedef QLA_D_Complex QLA_D2_HalfFermion[2][(4)/2]; typedef QLA_D_Complex QLA_D2_DiracFermion[2][4]; typedef QLA_D_Complex QLA_D2_ColorMatrix[2][2]; typedef QLA_D_Complex QLA_D2_DiracPropagator[2][4][2][4]; # 120 "./qla-1.7.1/qla_types.h" typedef QLA_Q_Complex QLA_Q2_ColorVector[2]; typedef QLA_Q_Complex QLA_Q2_HalfFermion[2][(4)/2]; typedef QLA_Q_Complex QLA_Q2_DiracFermion[2][4]; typedef QLA_Q_Complex QLA_Q2_ColorMatrix[2][2]; typedef QLA_Q_Complex QLA_Q2_DiracPropagator[2][4][2][4]; # 6 "qla-1.7.1/QLA_D3_V_vpeq_M_times_pV.c" 2 # 1 "./qla-1.7.1/qla_random.h" 1 typedef struct { unsigned int r0,r1,r2,r3,r4,r5,r6; unsigned int multiplier,addend,ic_state; float scale; } QLA_RandomState; QLA_F_Real QLA_random(QLA_RandomState *prn_pt); QLA_F_Real QLA_gaussian(QLA_RandomState *prn_pt); void QLA_seed_random(QLA_RandomState *prn_pt, int seed, QLA_Int index); extern int QLA_use_milc_gaussian; extern const char * QLA_version_str(void); extern int QLA_version_int(void); # 7 "qla-1.7.1/QLA_D3_V_vpeq_M_times_pV.c" 2 # 1 "./qla-1.7.1/qla_cmath.h" 1 QLA_F_Complex QLA_F_cexp( QLA_F_Complex *a ); QLA_D_Complex QLA_D_cexp( QLA_D_Complex *a ); QLA_F_Complex QLA_F_cexpi( QLA_F_Real theta ); QLA_D_Complex QLA_D_cexpi( QLA_D_Real theta ); QLA_F_Complex QLA_F_clog( QLA_F_Complex *a ); QLA_D_Complex QLA_D_clog( QLA_D_Complex *a ); QLA_F_Complex QLA_F_csqrt( QLA_F_Complex *z ); QLA_D_Complex QLA_D_csqrt( QLA_D_Complex *z ); # 8 "qla-1.7.1/QLA_D3_V_vpeq_M_times_pV.c" 2 # 1 "./qla-1.7.1/qla_d3.h" 1 # 22 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_veq_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xeq_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_veq_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xeq_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_eq_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_veq_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xeq_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_veq_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xeq_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_eq_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_veq_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xeq_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_veq_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xeq_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_eq_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veq_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeq_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_eq_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_veq_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xeq_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_veq_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xeq_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_V_peq_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_vpeq_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xpeq_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_vpeq_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xpeq_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_peq_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_vpeq_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xpeq_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_vpeq_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xpeq_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_peq_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_vpeq_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xpeq_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_vpeq_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xpeq_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_peq_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_vpeq_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xpeq_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_vpeq_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xpeq_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_peq_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_vpeq_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xpeq_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_vpeq_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xpeq_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_V_eqm_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_veqm_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xeqm_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_veqm_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xeqm_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_eqm_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_veqm_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xeqm_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_veqm_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xeqm_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_eqm_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_veqm_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xeqm_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_veqm_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xeqm_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_eqm_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veqm_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeqm_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veqm_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeqm_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_eqm_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_veqm_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xeqm_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_veqm_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xeqm_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_V_meq_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_vmeq_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xmeq_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_vmeq_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xmeq_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_meq_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_vmeq_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xmeq_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_vmeq_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xmeq_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_meq_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_vmeq_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xmeq_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_vmeq_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xmeq_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_meq_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_vmeq_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xmeq_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_vmeq_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xmeq_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_meq_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_vmeq_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xmeq_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_vmeq_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xmeq_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); # 207 "./qla-1.7.1/qla_d3.h" void QLA_D3_M_eq_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veq_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeq_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_eq_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_veq_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xeq_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_veq_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xeq_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_M_peq_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_vpeq_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xpeq_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_vpeq_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xpeq_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_peq_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_vpeq_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xpeq_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_vpeq_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xpeq_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_M_eqm_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veqm_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeqm_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veqm_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeqm_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_eqm_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_veqm_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xeqm_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_veqm_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xeqm_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_M_meq_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_vmeq_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xmeq_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_vmeq_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xmeq_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_meq_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_vmeq_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xmeq_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_vmeq_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xmeq_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); # 284 "./qla-1.7.1/qla_d3.h" void QLA_D3_M_eq_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veq_transpose_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeq_transpose_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_eq_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_veq_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xeq_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_veq_transpose_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xeq_transpose_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_M_peq_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_vpeq_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xpeq_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_vpeq_transpose_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xpeq_transpose_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_peq_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_vpeq_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xpeq_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_vpeq_transpose_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xpeq_transpose_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_M_eqm_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veqm_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeqm_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veqm_transpose_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeqm_transpose_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_eqm_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_veqm_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xeqm_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_veqm_transpose_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xeqm_transpose_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_M_meq_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_vmeq_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xmeq_transpose_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_vmeq_transpose_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xmeq_transpose_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_meq_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_vmeq_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xmeq_transpose_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_vmeq_transpose_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xmeq_transpose_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); # 361 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_veq_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xeq_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_veq_conj_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xeq_conj_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_eq_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_veq_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xeq_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_veq_conj_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xeq_conj_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_eq_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_veq_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xeq_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_veq_conj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xeq_conj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_eq_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veq_conj_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeq_conj_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_eq_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_veq_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xeq_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_veq_conj_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xeq_conj_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_V_peq_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_vpeq_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xpeq_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_vpeq_conj_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xpeq_conj_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_peq_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_vpeq_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xpeq_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_vpeq_conj_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xpeq_conj_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_peq_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_vpeq_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xpeq_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_vpeq_conj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xpeq_conj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_peq_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_vpeq_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xpeq_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_vpeq_conj_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xpeq_conj_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_peq_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_vpeq_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xpeq_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_vpeq_conj_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xpeq_conj_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_V_eqm_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_veqm_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xeqm_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_veqm_conj_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xeqm_conj_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_eqm_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_veqm_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xeqm_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_veqm_conj_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xeqm_conj_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_eqm_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_veqm_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xeqm_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_veqm_conj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xeqm_conj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_eqm_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veqm_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeqm_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veqm_conj_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeqm_conj_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_eqm_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_veqm_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xeqm_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_veqm_conj_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xeqm_conj_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_V_meq_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_vmeq_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xmeq_conj_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_vmeq_conj_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xmeq_conj_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_meq_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_vmeq_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xmeq_conj_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_vmeq_conj_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xmeq_conj_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_meq_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_vmeq_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xmeq_conj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_vmeq_conj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xmeq_conj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_meq_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_vmeq_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xmeq_conj_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_vmeq_conj_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xmeq_conj_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_meq_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_vmeq_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xmeq_conj_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_vmeq_conj_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xmeq_conj_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); # 546 "./qla-1.7.1/qla_d3.h" void QLA_D3_R_eq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_R_veq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_R_xeq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_R_veq_norm2_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_R_xeq_norm2_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_R_eq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_R_veq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_R_xeq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_R_veq_norm2_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_R_xeq_norm2_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_R_eq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_R_veq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_R_xeq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_R_veq_norm2_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_R_xeq_norm2_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_R_eq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_R_veq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_R_xeq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_R_veq_norm2_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_R_xeq_norm2_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_R_eq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_R_veq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_R_xeq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_R_veq_norm2_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_R_xeq_norm2_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_R_peq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_R_vpeq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_R_xpeq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_R_vpeq_norm2_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_R_xpeq_norm2_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_R_peq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_R_vpeq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_R_xpeq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_R_vpeq_norm2_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_R_xpeq_norm2_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_R_peq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_R_vpeq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_R_xpeq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_R_vpeq_norm2_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_R_xpeq_norm2_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_R_peq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_R_vpeq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_R_xpeq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_R_vpeq_norm2_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_R_xpeq_norm2_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_R_peq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_R_vpeq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_R_xpeq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_R_vpeq_norm2_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_R_xpeq_norm2_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_R_eqm_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_R_veqm_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_R_xeqm_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_R_veqm_norm2_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_R_xeqm_norm2_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_R_eqm_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_R_veqm_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_R_xeqm_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_R_veqm_norm2_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_R_xeqm_norm2_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_R_eqm_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_R_veqm_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_R_xeqm_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_R_veqm_norm2_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_R_xeqm_norm2_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_R_eqm_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_R_veqm_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_R_xeqm_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_R_veqm_norm2_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_R_xeqm_norm2_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_R_eqm_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_R_veqm_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_R_xeqm_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_R_veqm_norm2_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_R_xeqm_norm2_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_R_meq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_R_vmeq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_R_xmeq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_R_vmeq_norm2_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_R_xmeq_norm2_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_R_meq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_R_vmeq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_R_xmeq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_R_vmeq_norm2_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_R_xmeq_norm2_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_R_meq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_R_vmeq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_R_xmeq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_R_vmeq_norm2_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_R_xmeq_norm2_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_R_meq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_R_vmeq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_R_xmeq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_R_vmeq_norm2_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_R_xmeq_norm2_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_R_meq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_R_vmeq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_R_xmeq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_R_vmeq_norm2_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_R_xmeq_norm2_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); # 736 "./qla-1.7.1/qla_d3.h" void QLA_D3_C_eq_elem_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, int i_c, int j_c); void QLA_D3_C_veq_elem_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, int i_c, int j_c, int n); void QLA_D3_C_xeq_elem_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, int i_c, int j_c, int *index, int n); void QLA_D3_C_veq_elem_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, int i_c, int j_c, int n); void QLA_D3_C_xeq_elem_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, int i_c, int j_c, int *index, int n); void QLA_D3_M_eq_elem_C ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, int i_c, int j_c); void QLA_D3_M_veq_elem_C ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, int i_c, int j_c, int n); void QLA_D3_M_xeq_elem_C ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, int i_c, int j_c, int *index, int n); void QLA_D3_M_veq_elem_pC ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, int i_c, int j_c, int n); void QLA_D3_M_xeq_elem_pC ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, int i_c, int j_c, int *index, int n); void QLA_D3_C_eq_elem_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, int i_c, int i_s); void QLA_D3_C_veq_elem_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, int i_c, int i_s, int n); void QLA_D3_C_xeq_elem_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, int i_c, int i_s, int *index, int n); void QLA_D3_C_veq_elem_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, int i_c, int i_s, int n); void QLA_D3_C_xeq_elem_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, int i_c, int i_s, int *index, int n); void QLA_D3_H_eq_elem_C ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, int i_c, int i_s); void QLA_D3_H_veq_elem_C ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, int i_c, int i_s, int n); void QLA_D3_H_xeq_elem_C ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, int i_c, int i_s, int *index, int n); void QLA_D3_H_veq_elem_pC ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, int i_c, int i_s, int n); void QLA_D3_H_xeq_elem_pC ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, int i_c, int i_s, int *index, int n); void QLA_D3_C_eq_elem_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, int i_c, int i_s); void QLA_D3_C_veq_elem_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, int i_c, int i_s, int n); void QLA_D3_C_xeq_elem_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, int i_c, int i_s, int *index, int n); void QLA_D3_C_veq_elem_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, int i_c, int i_s, int n); void QLA_D3_C_xeq_elem_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, int i_c, int i_s, int *index, int n); void QLA_D3_D_eq_elem_C ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, int i_c, int i_s); void QLA_D3_D_veq_elem_C ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, int i_c, int i_s, int n); void QLA_D3_D_xeq_elem_C ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, int i_c, int i_s, int *index, int n); void QLA_D3_D_veq_elem_pC ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, int i_c, int i_s, int n); void QLA_D3_D_xeq_elem_pC ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, int i_c, int i_s, int *index, int n); void QLA_D3_C_eq_elem_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, int i_c); void QLA_D3_C_veq_elem_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, int i_c, int n); void QLA_D3_C_xeq_elem_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, int i_c, int *index, int n); void QLA_D3_C_veq_elem_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, int i_c, int n); void QLA_D3_C_xeq_elem_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, int i_c, int *index, int n); void QLA_D3_V_eq_elem_C ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, int i_c); void QLA_D3_V_veq_elem_C ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, int i_c, int n); void QLA_D3_V_xeq_elem_C ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, int i_c, int *index, int n); void QLA_D3_V_veq_elem_pC ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, int i_c, int n); void QLA_D3_V_xeq_elem_pC ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, int i_c, int *index, int n); void QLA_D3_C_eq_elem_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, int i_c, int i_s, int j_c, int j_s); void QLA_D3_C_veq_elem_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, int i_c, int i_s, int j_c, int j_s, int n); void QLA_D3_C_xeq_elem_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, int i_c, int i_s, int j_c, int j_s, int *index, int n); void QLA_D3_C_veq_elem_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, int i_c, int i_s, int j_c, int j_s, int n); void QLA_D3_C_xeq_elem_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, int i_c, int i_s, int j_c, int j_s, int *index, int n); void QLA_D3_P_eq_elem_C ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, int i_c, int i_s, int j_c, int j_s); void QLA_D3_P_veq_elem_C ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, int i_c, int i_s, int j_c, int j_s, int n); void QLA_D3_P_xeq_elem_C ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, int i_c, int i_s, int j_c, int j_s, int *index, int n); void QLA_D3_P_veq_elem_pC ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, int i_c, int i_s, int j_c, int j_s, int n); void QLA_D3_P_xeq_elem_pC ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, int i_c, int i_s, int j_c, int j_s, int *index, int n); # 831 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_colorvec_M ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, int j_c); void QLA_D3_V_veq_colorvec_M ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, int j_c, int n); void QLA_D3_V_xeq_colorvec_M ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, int j_c, int *index, int n); void QLA_D3_V_veq_colorvec_pM ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, int j_c, int n); void QLA_D3_V_xeq_colorvec_pM ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, int j_c, int *index, int n); void QLA_D3_M_eq_colorvec_V ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, int j_c); void QLA_D3_M_veq_colorvec_V ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, int j_c, int n); void QLA_D3_M_xeq_colorvec_V ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, int j_c, int *index, int n); void QLA_D3_M_veq_colorvec_pV ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, int j_c, int n); void QLA_D3_M_xeq_colorvec_pV ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, int j_c, int *index, int n); void QLA_D3_V_eq_colorvec_H ( QLA_D3_ColorVector *restrict r, QLA_D3_HalfFermion *restrict a, int i_s); void QLA_D3_V_veq_colorvec_H ( QLA_D3_ColorVector *restrict r, QLA_D3_HalfFermion *restrict a, int i_s, int n); void QLA_D3_V_xeq_colorvec_H ( QLA_D3_ColorVector *restrict r, QLA_D3_HalfFermion *restrict a, int i_s, int *index, int n); void QLA_D3_V_veq_colorvec_pH ( QLA_D3_ColorVector *restrict r, QLA_D3_HalfFermion *restrict *a, int i_s, int n); void QLA_D3_V_xeq_colorvec_pH ( QLA_D3_ColorVector *restrict r, QLA_D3_HalfFermion *restrict *a, int i_s, int *index, int n); void QLA_D3_H_eq_colorvec_V ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorVector *restrict a, int i_s); void QLA_D3_H_veq_colorvec_V ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorVector *restrict a, int i_s, int n); void QLA_D3_H_xeq_colorvec_V ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorVector *restrict a, int i_s, int *index, int n); void QLA_D3_H_veq_colorvec_pV ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorVector *restrict *a, int i_s, int n); void QLA_D3_H_xeq_colorvec_pV ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorVector *restrict *a, int i_s, int *index, int n); void QLA_D3_V_eq_colorvec_D ( QLA_D3_ColorVector *restrict r, QLA_D3_DiracFermion *restrict a, int i_s); void QLA_D3_V_veq_colorvec_D ( QLA_D3_ColorVector *restrict r, QLA_D3_DiracFermion *restrict a, int i_s, int n); void QLA_D3_V_xeq_colorvec_D ( QLA_D3_ColorVector *restrict r, QLA_D3_DiracFermion *restrict a, int i_s, int *index, int n); void QLA_D3_V_veq_colorvec_pD ( QLA_D3_ColorVector *restrict r, QLA_D3_DiracFermion *restrict *a, int i_s, int n); void QLA_D3_V_xeq_colorvec_pD ( QLA_D3_ColorVector *restrict r, QLA_D3_DiracFermion *restrict *a, int i_s, int *index, int n); void QLA_D3_D_eq_colorvec_V ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorVector *restrict a, int i_s); void QLA_D3_D_veq_colorvec_V ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorVector *restrict a, int i_s, int n); void QLA_D3_D_xeq_colorvec_V ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorVector *restrict a, int i_s, int *index, int n); void QLA_D3_D_veq_colorvec_pV ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorVector *restrict *a, int i_s, int n); void QLA_D3_D_xeq_colorvec_pV ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorVector *restrict *a, int i_s, int *index, int n); void QLA_D3_V_eq_colorvec_P ( QLA_D3_ColorVector *restrict r, QLA_D3_DiracPropagator *restrict a, int i_s, int j_c, int j_s); void QLA_D3_V_veq_colorvec_P ( QLA_D3_ColorVector *restrict r, QLA_D3_DiracPropagator *restrict a, int i_s, int j_c, int j_s, int n); void QLA_D3_V_xeq_colorvec_P ( QLA_D3_ColorVector *restrict r, QLA_D3_DiracPropagator *restrict a, int i_s, int j_c, int j_s, int *index, int n); void QLA_D3_V_veq_colorvec_pP ( QLA_D3_ColorVector *restrict r, QLA_D3_DiracPropagator *restrict *a, int i_s, int j_c, int j_s, int n); void QLA_D3_V_xeq_colorvec_pP ( QLA_D3_ColorVector *restrict r, QLA_D3_DiracPropagator *restrict *a, int i_s, int j_c, int j_s, int *index, int n); void QLA_D3_P_eq_colorvec_V ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorVector *restrict a, int i_s, int j_c, int j_s); void QLA_D3_P_veq_colorvec_V ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorVector *restrict a, int i_s, int j_c, int j_s, int n); void QLA_D3_P_xeq_colorvec_V ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorVector *restrict a, int i_s, int j_c, int j_s, int *index, int n); void QLA_D3_P_veq_colorvec_pV ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorVector *restrict *a, int i_s, int j_c, int j_s, int n); void QLA_D3_P_xeq_colorvec_pV ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorVector *restrict *a, int i_s, int j_c, int j_s, int *index, int n); # 908 "./qla-1.7.1/qla_d3.h" void QLA_D3_D_eq_diracvec_P ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracPropagator *restrict a, int j_c, int j_s); void QLA_D3_D_veq_diracvec_P ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracPropagator *restrict a, int j_c, int j_s, int n); void QLA_D3_D_xeq_diracvec_P ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracPropagator *restrict a, int j_c, int j_s, int *index, int n); void QLA_D3_D_veq_diracvec_pP ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracPropagator *restrict *a, int j_c, int j_s, int n); void QLA_D3_D_xeq_diracvec_pP ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracPropagator *restrict *a, int j_c, int j_s, int *index, int n); void QLA_D3_P_eq_diracvec_D ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracFermion *restrict a, int j_c, int j_s); void QLA_D3_P_veq_diracvec_D ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracFermion *restrict a, int j_c, int j_s, int n); void QLA_D3_P_xeq_diracvec_D ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracFermion *restrict a, int j_c, int j_s, int *index, int n); void QLA_D3_P_veq_diracvec_pD ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracFermion *restrict *a, int j_c, int j_s, int n); void QLA_D3_P_xeq_diracvec_pD ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracFermion *restrict *a, int j_c, int j_s, int *index, int n); void QLA_D3_R_eq_re_trace_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_R_veq_re_trace_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_R_xeq_re_trace_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_R_veq_re_trace_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_R_xeq_re_trace_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_R_eq_im_trace_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_R_veq_im_trace_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_R_xeq_im_trace_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_R_veq_im_trace_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_R_xeq_im_trace_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_C_eq_trace_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_C_veq_trace_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_C_xeq_trace_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_C_veq_trace_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_C_xeq_trace_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_M_eq_antiherm_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_antiherm_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_antiherm_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veq_antiherm_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeq_antiherm_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_C_eq_det_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_C_veq_det_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_C_xeq_det_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_C_veq_det_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_C_xeq_det_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_M_eq_inverse_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_inverse_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_inverse_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veq_inverse_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeq_inverse_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_M_eq_sqrt_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_sqrt_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_sqrt_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veq_sqrt_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeq_sqrt_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_M_eq_invsqrt_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_invsqrt_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_invsqrt_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veq_invsqrt_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeq_invsqrt_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_M_eq_exp_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_exp_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_exp_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veq_exp_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeq_exp_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_M_eq_log_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_log_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_log_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veq_log_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeq_log_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_M_eq_spintrace_P ( QLA_D3_ColorMatrix *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_M_veq_spintrace_P ( QLA_D3_ColorMatrix *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_M_xeq_spintrace_P ( QLA_D3_ColorMatrix *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_M_veq_spintrace_pP ( QLA_D3_ColorMatrix *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_M_xeq_spintrace_pP ( QLA_D3_ColorMatrix *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_H_eq_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign ); void QLA_D3_H_veq_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_H_xeq_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_H_veq_spproj_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_H_xeq_spproj_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_H_peq_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign ); void QLA_D3_H_vpeq_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_H_xpeq_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_H_vpeq_spproj_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_H_xpeq_spproj_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_H_eqm_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign ); void QLA_D3_H_veqm_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_H_xeqm_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_H_veqm_spproj_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_H_xeqm_spproj_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_H_meq_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign ); void QLA_D3_H_vmeq_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_H_xmeq_spproj_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_H_vmeq_spproj_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_H_xmeq_spproj_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_eq_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign ); void QLA_D3_D_veq_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_xeq_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_veq_sprecon_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_xeq_sprecon_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_peq_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign ); void QLA_D3_D_vpeq_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_xpeq_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_vpeq_sprecon_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_xpeq_sprecon_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_eqm_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign ); void QLA_D3_D_veqm_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_xeqm_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_veqm_sprecon_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_xeqm_sprecon_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_meq_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign ); void QLA_D3_D_vmeq_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_xmeq_sprecon_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_vmeq_sprecon_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_xmeq_sprecon_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_eq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign ); void QLA_D3_D_veq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_xeq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_veq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_veq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_veq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_xeq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_peq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign ); void QLA_D3_D_vpeq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_xpeq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_vpeq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_vpeq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_vpeq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_xpeq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_eqm_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign ); void QLA_D3_D_veqm_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_xeqm_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_veqm_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_veqm_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_veqm_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_xeqm_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_meq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign ); void QLA_D3_D_vmeq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_xmeq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_vmeq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_vmeq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int n); void QLA_D3_D_vmeq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int n); void QLA_D3_D_xmeq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_spproj_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_spproj_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int sign , int *index, int n); void QLA_D3_H_eq_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_H_eq_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_H_veq_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_veq_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_xeq_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xeq_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_veq_spproj_pM_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_veq_spproj_pMa_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_veq_spproj_M_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_veq_spproj_Ma_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_veq_spproj_pM_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_veq_spproj_pMa_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_xeq_spproj_pM_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xeq_spproj_pMa_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xeq_spproj_M_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xeq_spproj_Ma_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xeq_spproj_pM_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xeq_spproj_pMa_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_peq_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_H_peq_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_H_vpeq_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_vpeq_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_xpeq_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xpeq_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_vpeq_spproj_pM_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_vpeq_spproj_pMa_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_vpeq_spproj_M_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_vpeq_spproj_Ma_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_vpeq_spproj_pM_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_vpeq_spproj_pMa_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_xpeq_spproj_pM_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xpeq_spproj_pMa_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xpeq_spproj_M_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xpeq_spproj_Ma_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xpeq_spproj_pM_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xpeq_spproj_pMa_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_eqm_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_H_eqm_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_H_veqm_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_veqm_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_xeqm_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xeqm_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_veqm_spproj_pM_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_veqm_spproj_pMa_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_veqm_spproj_M_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_veqm_spproj_Ma_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_veqm_spproj_pM_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_veqm_spproj_pMa_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_xeqm_spproj_pM_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xeqm_spproj_pMa_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xeqm_spproj_M_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xeqm_spproj_Ma_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xeqm_spproj_pM_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xeqm_spproj_pMa_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_meq_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_H_meq_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_H_vmeq_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_vmeq_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_xmeq_spproj_M_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xmeq_spproj_Ma_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_vmeq_spproj_pM_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_vmeq_spproj_pMa_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_H_vmeq_spproj_M_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_vmeq_spproj_Ma_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_vmeq_spproj_pM_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_vmeq_spproj_pMa_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_H_xmeq_spproj_pM_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xmeq_spproj_pMa_times_D ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_H_xmeq_spproj_M_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xmeq_spproj_Ma_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xmeq_spproj_pM_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_H_xmeq_spproj_pMa_times_pD ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_eq_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign ); void QLA_D3_D_eq_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign ); void QLA_D3_D_veq_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veq_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_xeq_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_veq_sprecon_pM_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veq_sprecon_pMa_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veq_sprecon_M_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veq_sprecon_Ma_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veq_sprecon_pM_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veq_sprecon_pMa_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_xeq_sprecon_pM_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_sprecon_pMa_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_sprecon_M_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_sprecon_Ma_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_sprecon_pM_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_sprecon_pMa_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_peq_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign ); void QLA_D3_D_peq_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign ); void QLA_D3_D_vpeq_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vpeq_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_xpeq_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_vpeq_sprecon_pM_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vpeq_sprecon_pMa_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vpeq_sprecon_M_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vpeq_sprecon_Ma_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vpeq_sprecon_pM_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vpeq_sprecon_pMa_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_xpeq_sprecon_pM_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_sprecon_pMa_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_sprecon_M_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_sprecon_Ma_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_sprecon_pM_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_sprecon_pMa_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_eqm_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign ); void QLA_D3_D_eqm_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign ); void QLA_D3_D_veqm_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veqm_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_xeqm_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_veqm_sprecon_pM_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veqm_sprecon_pMa_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veqm_sprecon_M_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veqm_sprecon_Ma_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veqm_sprecon_pM_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veqm_sprecon_pMa_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_xeqm_sprecon_pM_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_sprecon_pMa_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_sprecon_M_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_sprecon_Ma_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_sprecon_pM_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_sprecon_pMa_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_meq_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign ); void QLA_D3_D_meq_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign ); void QLA_D3_D_vmeq_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vmeq_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_xmeq_sprecon_M_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_sprecon_Ma_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_vmeq_sprecon_pM_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vmeq_sprecon_pMa_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vmeq_sprecon_M_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vmeq_sprecon_Ma_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vmeq_sprecon_pM_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vmeq_sprecon_pMa_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_xmeq_sprecon_pM_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_sprecon_pMa_times_H ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_sprecon_M_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_sprecon_Ma_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_sprecon_pM_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_sprecon_pMa_times_pH ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_eq_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_D_eq_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_D_veq_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veq_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_xeq_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_veq_spproj_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veq_spproj_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veq_spproj_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veq_spproj_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veq_spproj_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veq_spproj_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_xeq_spproj_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_spproj_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_spproj_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_spproj_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_spproj_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeq_spproj_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_peq_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_D_peq_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_D_vpeq_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vpeq_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_xpeq_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_vpeq_spproj_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vpeq_spproj_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vpeq_spproj_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vpeq_spproj_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vpeq_spproj_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vpeq_spproj_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_xpeq_spproj_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_spproj_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_spproj_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_spproj_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_spproj_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xpeq_spproj_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_eqm_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_D_eqm_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_D_veqm_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veqm_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_xeqm_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_veqm_spproj_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veqm_spproj_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_veqm_spproj_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veqm_spproj_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veqm_spproj_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_veqm_spproj_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_xeqm_spproj_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_spproj_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_spproj_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_spproj_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_spproj_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xeqm_spproj_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_meq_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_D_meq_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign ); void QLA_D3_D_vmeq_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vmeq_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_xmeq_spproj_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_spproj_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_vmeq_spproj_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vmeq_spproj_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int n); void QLA_D3_D_vmeq_spproj_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vmeq_spproj_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vmeq_spproj_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_vmeq_spproj_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n); void QLA_D3_D_xmeq_spproj_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_spproj_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_spproj_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_spproj_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_spproj_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); void QLA_D3_D_xmeq_spproj_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int *index, int n); # 1338 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veq_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeq_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veq_r_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeq_r_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eq_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veq_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeq_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veq_r_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeq_r_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eq_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veq_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeq_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veq_r_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeq_r_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_eq_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veq_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeq_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veq_r_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeq_r_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eq_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_r_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_r_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_peq_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vpeq_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xpeq_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vpeq_r_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xpeq_r_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_peq_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vpeq_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xpeq_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vpeq_r_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xpeq_r_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_peq_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vpeq_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xpeq_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vpeq_r_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xpeq_r_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_peq_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vpeq_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xpeq_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vpeq_r_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xpeq_r_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_peq_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_r_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_r_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_eqm_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veqm_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeqm_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veqm_r_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeqm_r_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eqm_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veqm_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeqm_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veqm_r_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeqm_r_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eqm_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veqm_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeqm_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veqm_r_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeqm_r_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_eqm_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veqm_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeqm_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veqm_r_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeqm_r_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eqm_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_r_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_r_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_meq_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vmeq_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xmeq_r_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vmeq_r_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xmeq_r_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_meq_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vmeq_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xmeq_r_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vmeq_r_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xmeq_r_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_meq_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vmeq_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xmeq_r_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vmeq_r_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xmeq_r_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_meq_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vmeq_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xmeq_r_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vmeq_r_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xmeq_r_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_meq_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_r_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_r_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_r_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); # 1523 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veq_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeq_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veq_c_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeq_c_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eq_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veq_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeq_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veq_c_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeq_c_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eq_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veq_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeq_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veq_c_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeq_c_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_eq_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veq_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeq_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veq_c_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeq_c_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eq_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_c_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_c_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_peq_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vpeq_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xpeq_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vpeq_c_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xpeq_c_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_peq_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vpeq_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xpeq_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vpeq_c_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xpeq_c_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_peq_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vpeq_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xpeq_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vpeq_c_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xpeq_c_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_peq_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vpeq_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xpeq_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vpeq_c_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xpeq_c_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_peq_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_c_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_c_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_eqm_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veqm_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeqm_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veqm_c_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeqm_c_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eqm_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veqm_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeqm_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veqm_c_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeqm_c_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eqm_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veqm_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeqm_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veqm_c_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeqm_c_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_eqm_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veqm_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeqm_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veqm_c_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeqm_c_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eqm_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_c_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_c_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_meq_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vmeq_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xmeq_c_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vmeq_c_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xmeq_c_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_meq_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vmeq_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xmeq_c_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vmeq_c_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xmeq_c_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_meq_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vmeq_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xmeq_c_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vmeq_c_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xmeq_c_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_meq_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vmeq_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xmeq_c_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vmeq_c_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xmeq_c_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_meq_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_c_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_c_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_c_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); # 1708 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_veq_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xeq_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_veq_i_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xeq_i_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_eq_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_veq_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xeq_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_veq_i_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xeq_i_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_eq_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_veq_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xeq_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_veq_i_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xeq_i_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_eq_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veq_i_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeq_i_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_eq_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_veq_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xeq_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_veq_i_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xeq_i_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_V_peq_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_vpeq_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xpeq_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_vpeq_i_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xpeq_i_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_peq_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_vpeq_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xpeq_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_vpeq_i_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xpeq_i_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_peq_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_vpeq_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xpeq_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_vpeq_i_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xpeq_i_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_peq_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_vpeq_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xpeq_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_vpeq_i_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xpeq_i_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_peq_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_vpeq_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xpeq_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_vpeq_i_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xpeq_i_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_V_eqm_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_veqm_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xeqm_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_veqm_i_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xeqm_i_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_eqm_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_veqm_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xeqm_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_veqm_i_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xeqm_i_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_eqm_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_veqm_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xeqm_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_veqm_i_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xeqm_i_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_eqm_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veqm_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeqm_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_veqm_i_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xeqm_i_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_eqm_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_veqm_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xeqm_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_veqm_i_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xeqm_i_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); void QLA_D3_V_meq_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_vmeq_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xmeq_i_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_V_vmeq_i_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_V_xmeq_i_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_H_meq_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_vmeq_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xmeq_i_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_H_vmeq_i_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_H_xmeq_i_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_D_meq_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_vmeq_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xmeq_i_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_D_vmeq_i_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_D_xmeq_i_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_M_meq_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_vmeq_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xmeq_i_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_M_vmeq_i_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_M_xmeq_i_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_P_meq_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_vmeq_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xmeq_i_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_P_vmeq_i_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_P_xmeq_i_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); # 1893 "./qla-1.7.1/qla_d3.h" void QLA_D3_D_eq_gamma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu); void QLA_D3_D_veq_gamma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int n); void QLA_D3_D_xeq_gamma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int mu, int *index, int n); void QLA_D3_D_veq_gamma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int n); void QLA_D3_D_xeq_gamma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int mu, int *index, int n); void QLA_D3_P_eq_gamma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int mu); void QLA_D3_P_veq_gamma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int mu, int n); void QLA_D3_P_xeq_gamma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int mu, int *index, int n); void QLA_D3_P_veq_gamma_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int mu, int n); void QLA_D3_P_xeq_gamma_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int mu, int *index, int n); # 1916 "./qla-1.7.1/qla_d3.h" void QLA_D3_P_eq_P_times_gamma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int mu); void QLA_D3_P_veq_P_times_gamma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int mu, int n); void QLA_D3_P_xeq_P_times_gamma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int mu, int *index, int n); void QLA_D3_P_veq_pP_times_gamma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int mu, int n); void QLA_D3_P_xeq_pP_times_gamma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int mu, int *index, int n); # 1935 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_V_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veq_V_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeq_V_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veq_pV_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_veq_V_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_veq_pV_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeq_pV_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xeq_V_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xeq_pV_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_eq_V_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veq_V_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeq_V_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veq_pV_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_veq_V_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_veq_pV_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeq_pV_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xeq_V_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xeq_pV_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eq_H_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veq_H_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeq_H_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veq_pH_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_veq_H_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_veq_pH_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeq_pH_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xeq_H_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xeq_pH_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_eq_H_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veq_H_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeq_H_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veq_pH_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_veq_H_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_veq_pH_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeq_pH_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xeq_H_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xeq_pH_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eq_D_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veq_D_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeq_D_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veq_pD_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_veq_D_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_veq_pD_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeq_pD_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xeq_D_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xeq_pD_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_eq_D_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veq_D_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeq_D_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veq_pD_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_veq_D_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_veq_pD_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeq_pD_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xeq_D_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xeq_pD_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_eq_M_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veq_M_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeq_M_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veq_pM_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veq_M_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veq_pM_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeq_pM_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeq_M_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeq_pM_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_eq_M_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veq_M_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeq_M_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veq_pM_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veq_M_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veq_pM_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeq_pM_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeq_M_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeq_pM_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eq_P_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_P_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_P_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pP_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_P_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pP_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pP_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_P_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pP_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eq_P_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_P_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_P_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pP_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_P_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pP_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pP_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_P_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pP_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); # 2070 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veq_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeq_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veq_pR_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_veq_R_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_veq_pR_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeq_pR_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xeq_R_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xeq_pR_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eq_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veq_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeq_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veq_pR_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_veq_R_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_veq_pR_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeq_pR_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xeq_R_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xeq_pR_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eq_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veq_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeq_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veq_pR_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_veq_R_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_veq_pR_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeq_pR_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xeq_R_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xeq_pR_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_eq_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veq_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeq_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veq_pR_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veq_R_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veq_pR_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeq_pR_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeq_R_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeq_pR_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eq_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pR_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_R_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pR_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pR_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_R_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pR_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_peq_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vpeq_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xpeq_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vpeq_pR_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_vpeq_R_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_vpeq_pR_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xpeq_pR_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xpeq_R_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xpeq_pR_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_peq_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vpeq_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xpeq_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vpeq_pR_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_vpeq_R_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_vpeq_pR_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xpeq_pR_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xpeq_R_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xpeq_pR_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_peq_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vpeq_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xpeq_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vpeq_pR_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_vpeq_R_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_vpeq_pR_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xpeq_pR_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xpeq_R_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xpeq_pR_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_peq_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vpeq_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xpeq_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vpeq_pR_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vpeq_R_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vpeq_pR_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xpeq_pR_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xpeq_R_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xpeq_pR_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_peq_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_pR_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vpeq_R_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vpeq_pR_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_pR_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xpeq_R_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pR_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_eqm_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veqm_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeqm_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veqm_pR_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_veqm_R_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_veqm_pR_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeqm_pR_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xeqm_R_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xeqm_pR_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eqm_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veqm_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeqm_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veqm_pR_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_veqm_R_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_veqm_pR_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeqm_pR_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xeqm_R_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xeqm_pR_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eqm_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veqm_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeqm_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veqm_pR_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_veqm_R_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_veqm_pR_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeqm_pR_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xeqm_R_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xeqm_pR_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_eqm_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veqm_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeqm_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veqm_pR_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veqm_R_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veqm_pR_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeqm_pR_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeqm_R_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeqm_pR_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eqm_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_pR_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veqm_R_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veqm_pR_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_pR_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeqm_R_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pR_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_meq_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vmeq_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xmeq_R_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vmeq_pR_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_vmeq_R_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_vmeq_pR_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xmeq_pR_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xmeq_R_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xmeq_pR_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_meq_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vmeq_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xmeq_R_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vmeq_pR_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_vmeq_R_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_vmeq_pR_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xmeq_pR_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xmeq_R_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xmeq_pR_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_meq_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vmeq_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xmeq_R_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vmeq_pR_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_vmeq_R_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_vmeq_pR_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xmeq_pR_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xmeq_R_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xmeq_pR_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_meq_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vmeq_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xmeq_R_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vmeq_pR_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vmeq_R_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vmeq_pR_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xmeq_pR_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xmeq_R_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xmeq_pR_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_meq_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_R_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_pR_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vmeq_R_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vmeq_pR_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_pR_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xmeq_R_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pR_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); # 2335 "./qla-1.7.1/qla_d3.h" void QLA_D3_M_eq_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veq_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeq_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veq_pM_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veq_M_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veq_pM_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeq_pM_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeq_M_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeq_pM_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_eq_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veq_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeq_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veq_pM_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veq_M_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veq_pM_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeq_pM_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeq_M_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeq_pM_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_eq_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veq_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeq_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veq_pMa_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veq_Ma_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veq_pMa_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeq_pMa_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeq_Ma_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeq_pMa_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_eq_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veq_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeq_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veq_pMa_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veq_Ma_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veq_pMa_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeq_pMa_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeq_Ma_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeq_pMa_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eq_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pP_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_P_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pP_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pP_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_P_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pP_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eq_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pP_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_P_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pP_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pP_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_P_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pP_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eq_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pPa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_Pa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pPa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pPa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_Pa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pPa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eq_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pPa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_Pa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pPa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pPa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_Pa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pPa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_M_peq_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vpeq_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xpeq_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vpeq_pM_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vpeq_M_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vpeq_pM_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xpeq_pM_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xpeq_M_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xpeq_pM_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_peq_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vpeq_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xpeq_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vpeq_pM_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vpeq_M_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vpeq_pM_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xpeq_pM_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xpeq_M_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xpeq_pM_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_peq_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vpeq_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xpeq_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vpeq_pMa_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vpeq_Ma_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vpeq_pMa_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xpeq_pMa_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xpeq_Ma_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xpeq_pMa_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_peq_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vpeq_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xpeq_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vpeq_pMa_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vpeq_Ma_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vpeq_pMa_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xpeq_pMa_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xpeq_Ma_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xpeq_pMa_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_peq_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_pP_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vpeq_P_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vpeq_pP_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_pP_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xpeq_P_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pP_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_peq_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_pP_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vpeq_P_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vpeq_pP_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_pP_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xpeq_P_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pP_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_peq_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_pPa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vpeq_Pa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vpeq_pPa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_pPa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xpeq_Pa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pPa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_peq_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_pPa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vpeq_Pa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vpeq_pPa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_pPa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xpeq_Pa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pPa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_M_eqm_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veqm_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeqm_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veqm_pM_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veqm_M_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veqm_pM_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeqm_pM_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeqm_M_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeqm_pM_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_eqm_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veqm_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeqm_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veqm_pM_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veqm_M_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veqm_pM_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeqm_pM_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeqm_M_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeqm_pM_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_eqm_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veqm_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeqm_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veqm_pMa_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veqm_Ma_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veqm_pMa_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeqm_pMa_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeqm_Ma_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeqm_pMa_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_eqm_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veqm_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeqm_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veqm_pMa_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veqm_Ma_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veqm_pMa_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeqm_pMa_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeqm_Ma_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeqm_pMa_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eqm_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_pP_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veqm_P_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veqm_pP_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_pP_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeqm_P_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pP_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eqm_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_pP_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veqm_P_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veqm_pP_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_pP_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeqm_P_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pP_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eqm_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_pPa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veqm_Pa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veqm_pPa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_pPa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeqm_Pa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pPa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eqm_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_pPa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veqm_Pa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veqm_pPa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_pPa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeqm_Pa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pPa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_M_meq_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vmeq_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xmeq_M_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vmeq_pM_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vmeq_M_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vmeq_pM_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xmeq_pM_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xmeq_M_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xmeq_pM_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_meq_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vmeq_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xmeq_M_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vmeq_pM_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vmeq_M_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vmeq_pM_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xmeq_pM_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xmeq_M_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xmeq_pM_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_meq_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vmeq_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xmeq_Ma_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vmeq_pMa_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vmeq_Ma_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vmeq_pMa_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xmeq_pMa_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xmeq_Ma_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xmeq_pMa_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_meq_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vmeq_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xmeq_Ma_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vmeq_pMa_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vmeq_Ma_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vmeq_pMa_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xmeq_pMa_times_Ma ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xmeq_Ma_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xmeq_pMa_times_pMa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_meq_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_P_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_pP_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vmeq_P_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vmeq_pP_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_pP_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xmeq_P_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pP_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_meq_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_P_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_pP_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vmeq_P_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vmeq_pP_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_pP_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xmeq_P_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pP_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_meq_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_Pa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_pPa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vmeq_Pa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vmeq_pPa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_pPa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xmeq_Pa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pPa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_meq_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_Pa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_pPa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vmeq_Pa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vmeq_pPa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_pPa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xmeq_Pa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pPa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); # 2756 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veq_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeq_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veq_pC_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_veq_C_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_veq_pC_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeq_pC_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xeq_C_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xeq_pC_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eq_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veq_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeq_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veq_pC_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_veq_C_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_veq_pC_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeq_pC_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xeq_C_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xeq_pC_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eq_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veq_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeq_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veq_pC_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_veq_C_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_veq_pC_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeq_pC_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xeq_C_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xeq_pC_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_eq_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veq_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeq_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veq_pC_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veq_C_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veq_pC_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeq_pC_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeq_C_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeq_pC_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eq_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pC_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_C_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pC_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pC_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_C_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pC_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_peq_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vpeq_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xpeq_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vpeq_pC_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_vpeq_C_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_vpeq_pC_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xpeq_pC_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xpeq_C_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xpeq_pC_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_peq_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vpeq_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xpeq_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vpeq_pC_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_vpeq_C_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_vpeq_pC_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xpeq_pC_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xpeq_C_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xpeq_pC_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_peq_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vpeq_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xpeq_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vpeq_pC_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_vpeq_C_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_vpeq_pC_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xpeq_pC_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xpeq_C_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xpeq_pC_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_peq_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vpeq_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xpeq_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vpeq_pC_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vpeq_C_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vpeq_pC_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xpeq_pC_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xpeq_C_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xpeq_pC_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_peq_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_pC_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vpeq_C_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vpeq_pC_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_pC_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xpeq_C_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pC_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_eqm_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veqm_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeqm_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veqm_pC_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_veqm_C_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_veqm_pC_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeqm_pC_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xeqm_C_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xeqm_pC_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eqm_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veqm_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeqm_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veqm_pC_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_veqm_C_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_veqm_pC_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeqm_pC_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xeqm_C_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xeqm_pC_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eqm_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veqm_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeqm_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veqm_pC_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_veqm_C_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_veqm_pC_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeqm_pC_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xeqm_C_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xeqm_pC_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_eqm_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_veqm_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xeqm_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_veqm_pC_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_veqm_C_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_veqm_pC_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xeqm_pC_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xeqm_C_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xeqm_pC_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eqm_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_pC_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veqm_C_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veqm_pC_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_pC_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeqm_C_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pC_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_V_meq_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vmeq_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xmeq_C_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vmeq_pC_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_vmeq_C_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_vmeq_pC_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xmeq_pC_times_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xmeq_C_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xmeq_pC_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_meq_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vmeq_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xmeq_C_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vmeq_pC_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_vmeq_C_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_vmeq_pC_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xmeq_pC_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xmeq_C_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xmeq_pC_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_meq_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vmeq_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xmeq_C_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vmeq_pC_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_vmeq_C_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_vmeq_pC_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xmeq_pC_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xmeq_C_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xmeq_pC_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_M_meq_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_M_vmeq_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_xmeq_C_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_vmeq_pC_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_M_vmeq_C_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_vmeq_pC_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_M_xmeq_pC_times_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_M_xmeq_C_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_M_xmeq_pC_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_meq_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_C_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_pC_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vmeq_C_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vmeq_pC_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_pC_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xmeq_C_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pC_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); # 3021 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veq_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeq_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veq_pM_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_veq_M_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_veq_pM_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeq_pM_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xeq_M_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xeq_pM_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eq_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veq_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeq_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veq_pM_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_veq_M_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_veq_pM_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeq_pM_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xeq_M_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xeq_pM_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eq_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veq_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeq_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veq_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_veq_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_veq_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeq_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xeq_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xeq_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_V_eq_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veq_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeq_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veq_pMa_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_veq_Ma_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_veq_pMa_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeq_pMa_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xeq_Ma_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xeq_pMa_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eq_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veq_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeq_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veq_pMa_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_veq_Ma_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_veq_pMa_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeq_pMa_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xeq_Ma_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xeq_pMa_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eq_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veq_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeq_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veq_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_veq_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_veq_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeq_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xeq_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xeq_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_V_peq_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vpeq_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xpeq_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vpeq_pM_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_vpeq_M_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_vpeq_pM_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xpeq_pM_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xpeq_M_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xpeq_pM_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_peq_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vpeq_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xpeq_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vpeq_pM_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_vpeq_M_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_vpeq_pM_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xpeq_pM_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xpeq_M_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xpeq_pM_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_peq_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vpeq_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xpeq_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vpeq_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_vpeq_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_vpeq_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xpeq_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xpeq_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xpeq_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_V_peq_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vpeq_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xpeq_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vpeq_pMa_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_vpeq_Ma_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_vpeq_pMa_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xpeq_pMa_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xpeq_Ma_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xpeq_pMa_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_peq_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vpeq_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xpeq_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vpeq_pMa_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_vpeq_Ma_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_vpeq_pMa_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xpeq_pMa_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xpeq_Ma_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xpeq_pMa_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_peq_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vpeq_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xpeq_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vpeq_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_vpeq_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_vpeq_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xpeq_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xpeq_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xpeq_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_V_eqm_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veqm_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeqm_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veqm_pM_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_veqm_M_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_veqm_pM_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeqm_pM_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xeqm_M_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xeqm_pM_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eqm_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veqm_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeqm_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veqm_pM_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_veqm_M_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_veqm_pM_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeqm_pM_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xeqm_M_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xeqm_pM_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eqm_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veqm_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeqm_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veqm_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_veqm_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_veqm_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeqm_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xeqm_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xeqm_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_V_eqm_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_veqm_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xeqm_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_veqm_pMa_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_veqm_Ma_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_veqm_pMa_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xeqm_pMa_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xeqm_Ma_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xeqm_pMa_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_eqm_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_veqm_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xeqm_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_veqm_pMa_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_veqm_Ma_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_veqm_pMa_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xeqm_pMa_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xeqm_Ma_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xeqm_pMa_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_eqm_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_veqm_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xeqm_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_veqm_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_veqm_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_veqm_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xeqm_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xeqm_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xeqm_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_V_meq_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vmeq_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xmeq_M_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vmeq_pM_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_vmeq_M_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_vmeq_pM_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xmeq_pM_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xmeq_M_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xmeq_pM_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_meq_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vmeq_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xmeq_M_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vmeq_pM_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_vmeq_M_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_vmeq_pM_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xmeq_pM_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xmeq_M_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xmeq_pM_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_meq_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vmeq_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xmeq_M_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vmeq_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_vmeq_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_vmeq_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xmeq_pM_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xmeq_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xmeq_pM_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_V_meq_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_V_vmeq_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_xmeq_Ma_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_vmeq_pMa_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_V_vmeq_Ma_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_vmeq_pMa_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_V_xmeq_pMa_times_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_V_xmeq_Ma_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_V_xmeq_pMa_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_H_meq_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_H_vmeq_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_xmeq_Ma_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_vmeq_pMa_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_H_vmeq_Ma_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_vmeq_pMa_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_H_xmeq_pMa_times_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_H_xmeq_Ma_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_H_xmeq_pMa_times_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_D_meq_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_D_vmeq_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_xmeq_Ma_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_vmeq_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_D_vmeq_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_vmeq_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_D_xmeq_pMa_times_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_D_xmeq_Ma_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_D_xmeq_pMa_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); # 3338 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int nd); void QLA_D3_V_veq_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_xeq_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_veq_npM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_veq_nM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_veq_npM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_xeq_npM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_xeq_nM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_xeq_npM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_eq_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int nd); void QLA_D3_V_veq_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_xeq_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_veq_npMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_veq_nMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_veq_npMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_xeq_npMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_xeq_nMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_xeq_npMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_peq_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int nd); void QLA_D3_V_vpeq_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_xpeq_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_vpeq_npM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_vpeq_nM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_vpeq_npM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_xpeq_npM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_xpeq_nM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_xpeq_npM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_peq_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int nd); void QLA_D3_V_vpeq_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_xpeq_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_vpeq_npMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_vpeq_nMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_vpeq_npMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_xpeq_npMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_xpeq_nMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_xpeq_npMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_eqm_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int nd); void QLA_D3_V_veqm_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_xeqm_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_veqm_npM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_veqm_nM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_veqm_npM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_xeqm_npM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_xeqm_nM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_xeqm_npM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_eqm_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int nd); void QLA_D3_V_veqm_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_xeqm_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_veqm_npMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_veqm_nMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_veqm_npMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_xeqm_npMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_xeqm_nMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_xeqm_npMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_meq_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int nd); void QLA_D3_V_vmeq_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_xmeq_nM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_vmeq_npM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_vmeq_nM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_vmeq_npM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_xmeq_npM_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_xmeq_nM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_xmeq_npM_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_meq_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int nd); void QLA_D3_V_vmeq_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_xmeq_nMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_vmeq_npMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int n, int nd); void QLA_D3_V_vmeq_nMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_vmeq_npMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int n, int nd); void QLA_D3_V_xmeq_npMa_times_nV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict *b, int *index, int n, int nd); void QLA_D3_V_xmeq_nMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); void QLA_D3_V_xmeq_npMa_times_npV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict **a, QLA_D3_ColorVector *restrict **b, int *index, int n, int nd); # 3447 "./qla-1.7.1/qla_d3.h" void QLA_D3_P_eq_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pM_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_M_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pM_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pM_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_M_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pM_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eq_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pM_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_M_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pM_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pM_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_M_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pM_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eq_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pMa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_Ma_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pMa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pMa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_Ma_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pMa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eq_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veq_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeq_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veq_pMa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veq_Ma_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veq_pMa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeq_pMa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeq_Ma_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeq_pMa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_peq_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_pM_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vpeq_M_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vpeq_pM_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_pM_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xpeq_M_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pM_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_peq_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_pM_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vpeq_M_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vpeq_pM_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_pM_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xpeq_M_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pM_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_peq_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_pMa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vpeq_Ma_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vpeq_pMa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_pMa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xpeq_Ma_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pMa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_peq_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vpeq_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xpeq_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vpeq_pMa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vpeq_Ma_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vpeq_pMa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xpeq_pMa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xpeq_Ma_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pMa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eqm_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_pM_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veqm_M_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veqm_pM_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_pM_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeqm_M_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pM_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eqm_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_pM_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veqm_M_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veqm_pM_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_pM_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeqm_M_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pM_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eqm_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_pMa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veqm_Ma_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veqm_pMa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_pMa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeqm_Ma_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pMa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eqm_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_veqm_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xeqm_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_veqm_pMa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_veqm_Ma_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_veqm_pMa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xeqm_pMa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xeqm_Ma_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pMa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_meq_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_M_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_pM_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vmeq_M_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vmeq_pM_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_pM_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xmeq_M_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pM_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_meq_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_M_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_pM_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vmeq_M_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vmeq_pM_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_pM_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xmeq_M_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pM_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_meq_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_Ma_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_pMa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vmeq_Ma_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vmeq_pMa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_pMa_times_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xmeq_Ma_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pMa_times_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_meq_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_P_vmeq_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_xmeq_Ma_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_vmeq_pMa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_P_vmeq_Ma_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_vmeq_pMa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_P_xmeq_pMa_times_Pa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_P_xmeq_Ma_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pMa_times_pPa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_P_eq_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_veq_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xeq_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_veq_pP_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_veq_P_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_veq_pP_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xeq_pP_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xeq_P_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xeq_pP_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eq_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_veq_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xeq_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_veq_pP_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_veq_P_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_veq_pP_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xeq_pP_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xeq_P_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xeq_pP_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eq_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_veq_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xeq_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_veq_pPa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_veq_Pa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_veq_pPa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xeq_pPa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xeq_Pa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xeq_pPa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eq_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_veq_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xeq_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_veq_pPa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_veq_Pa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_veq_pPa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xeq_pPa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xeq_Pa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xeq_pPa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_peq_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_vpeq_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xpeq_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_vpeq_pP_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_vpeq_P_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_vpeq_pP_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xpeq_pP_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xpeq_P_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pP_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_peq_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_vpeq_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xpeq_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_vpeq_pP_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_vpeq_P_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_vpeq_pP_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xpeq_pP_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xpeq_P_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pP_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_peq_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_vpeq_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xpeq_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_vpeq_pPa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_vpeq_Pa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_vpeq_pPa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xpeq_pPa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xpeq_Pa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pPa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_peq_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_vpeq_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xpeq_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_vpeq_pPa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_vpeq_Pa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_vpeq_pPa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xpeq_pPa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xpeq_Pa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xpeq_pPa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eqm_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_veqm_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xeqm_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_veqm_pP_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_veqm_P_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_veqm_pP_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xeqm_pP_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xeqm_P_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pP_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eqm_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_veqm_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xeqm_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_veqm_pP_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_veqm_P_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_veqm_pP_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xeqm_pP_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xeqm_P_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pP_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eqm_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_veqm_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xeqm_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_veqm_pPa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_veqm_Pa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_veqm_pPa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xeqm_pPa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xeqm_Pa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pPa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_eqm_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_veqm_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xeqm_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_veqm_pPa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_veqm_Pa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_veqm_pPa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xeqm_pPa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xeqm_Pa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xeqm_pPa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_meq_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_vmeq_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xmeq_P_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_vmeq_pP_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_vmeq_P_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_vmeq_pP_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xmeq_pP_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xmeq_P_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pP_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_meq_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_vmeq_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xmeq_P_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_vmeq_pP_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_vmeq_P_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_vmeq_pP_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xmeq_pP_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xmeq_P_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pP_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_meq_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_vmeq_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xmeq_Pa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_vmeq_pPa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_vmeq_Pa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_vmeq_pPa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xmeq_pPa_times_M ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xmeq_Pa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pPa_times_pM ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_meq_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_P_vmeq_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_xmeq_Pa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_vmeq_pPa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_P_vmeq_Pa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_vmeq_pPa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_P_xmeq_pPa_times_Ma ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_P_xmeq_Pa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_P_xmeq_pPa_times_pMa ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); # 3868 "./qla-1.7.1/qla_d3.h" void QLA_D3_M_eq_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_M_veq_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_M_xeq_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_M_veq_pV_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_M_veq_V_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_M_veq_pV_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_M_xeq_pV_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_M_xeq_V_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_M_xeq_pV_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_M_peq_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_M_vpeq_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_M_xpeq_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_M_vpeq_pV_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_M_vpeq_V_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_M_vpeq_pV_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_M_xpeq_pV_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_M_xpeq_V_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_M_xpeq_pV_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_M_eqm_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_M_veqm_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_M_xeqm_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_M_veqm_pV_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_M_veqm_V_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_M_veqm_pV_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_M_xeqm_pV_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_M_xeqm_V_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_M_xeqm_pV_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_M_meq_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_M_vmeq_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_M_xmeq_V_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_M_vmeq_pV_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_M_vmeq_V_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_M_vmeq_pV_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_M_xmeq_pV_times_Va ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_M_xmeq_V_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_M_xmeq_pV_times_pVa ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); # 3925 "./qla-1.7.1/qla_d3.h" void QLA_D3_C_eq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_C_veq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_C_xeq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_C_veq_pM_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_C_veq_M_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_C_veq_pM_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_C_xeq_pM_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_C_xeq_M_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_C_xeq_pM_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_C_eq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_C_veq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_C_xeq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_C_veq_pH_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_C_veq_H_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_C_veq_pH_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_C_xeq_pH_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_C_xeq_H_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_C_xeq_pH_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_C_eq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_C_veq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_C_xeq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_C_veq_pD_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_C_veq_D_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_C_veq_pD_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_C_xeq_pD_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_C_xeq_D_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_C_xeq_pD_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_C_eq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_C_veq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_C_xeq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_C_veq_pV_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_C_veq_V_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_C_veq_pV_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_C_xeq_pV_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_C_xeq_V_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_C_xeq_pV_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_C_eq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_C_veq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_C_xeq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_C_veq_pP_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_C_veq_P_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_C_veq_pP_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_C_xeq_pP_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_C_xeq_P_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_C_xeq_pP_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_C_peq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_C_vpeq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_C_xpeq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_C_vpeq_pM_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_C_vpeq_M_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_C_vpeq_pM_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_C_xpeq_pM_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_C_xpeq_M_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_C_xpeq_pM_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_C_peq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_C_vpeq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_C_xpeq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_C_vpeq_pH_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_C_vpeq_H_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_C_vpeq_pH_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_C_xpeq_pH_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_C_xpeq_H_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_C_xpeq_pH_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_C_peq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_C_vpeq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_C_xpeq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_C_vpeq_pD_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_C_vpeq_D_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_C_vpeq_pD_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_C_xpeq_pD_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_C_xpeq_D_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_C_xpeq_pD_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_C_peq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_C_vpeq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_C_xpeq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_C_vpeq_pV_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_C_vpeq_V_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_C_vpeq_pV_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_C_xpeq_pV_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_C_xpeq_V_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_C_xpeq_pV_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_C_peq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_C_vpeq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_C_xpeq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_C_vpeq_pP_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_C_vpeq_P_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_C_vpeq_pP_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_C_xpeq_pP_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_C_xpeq_P_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_C_xpeq_pP_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_C_eqm_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_C_veqm_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_C_xeqm_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_C_veqm_pM_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_C_veqm_M_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_C_veqm_pM_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_C_xeqm_pM_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_C_xeqm_M_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_C_xeqm_pM_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_C_eqm_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_C_veqm_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_C_xeqm_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_C_veqm_pH_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_C_veqm_H_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_C_veqm_pH_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_C_xeqm_pH_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_C_xeqm_H_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_C_xeqm_pH_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_C_eqm_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_C_veqm_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_C_xeqm_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_C_veqm_pD_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_C_veqm_D_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_C_veqm_pD_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_C_xeqm_pD_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_C_xeqm_D_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_C_xeqm_pD_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_C_eqm_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_C_veqm_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_C_xeqm_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_C_veqm_pV_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_C_veqm_V_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_C_veqm_pV_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_C_xeqm_pV_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_C_xeqm_V_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_C_xeqm_pV_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_C_eqm_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_C_veqm_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_C_xeqm_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_C_veqm_pP_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_C_veqm_P_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_C_veqm_pP_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_C_xeqm_pP_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_C_xeqm_P_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_C_xeqm_pP_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_C_meq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_C_vmeq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_C_xmeq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_C_vmeq_pM_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_C_vmeq_M_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_C_vmeq_pM_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_C_xmeq_pM_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_C_xmeq_M_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_C_xmeq_pM_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_C_meq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_C_vmeq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_C_xmeq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_C_vmeq_pH_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_C_vmeq_H_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_C_vmeq_pH_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_C_xmeq_pH_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_C_xmeq_H_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_C_xmeq_pH_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_C_meq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_C_vmeq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_C_xmeq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_C_vmeq_pD_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_C_vmeq_D_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_C_vmeq_pD_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_C_xmeq_pD_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_C_xmeq_D_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_C_xmeq_pD_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_C_meq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_C_vmeq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_C_xmeq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_C_vmeq_pV_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_C_vmeq_V_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_C_vmeq_pV_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_C_xmeq_pV_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_C_xmeq_V_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_C_xmeq_pV_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_C_meq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_C_vmeq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_C_xmeq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_C_vmeq_pP_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_C_vmeq_P_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_C_vmeq_pP_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_C_xmeq_pP_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_C_xmeq_P_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_C_xmeq_pP_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_R_eq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_R_veq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_R_xeq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_R_veq_re_pM_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_R_veq_re_M_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_R_veq_re_pM_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_R_xeq_re_pM_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_R_xeq_re_M_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_R_xeq_re_pM_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_R_eq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_R_veq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_R_xeq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_R_veq_re_pH_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_R_veq_re_H_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_R_veq_re_pH_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_R_xeq_re_pH_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_R_xeq_re_H_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_R_xeq_re_pH_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_R_eq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_R_veq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_R_xeq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_R_veq_re_pD_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_R_veq_re_D_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_R_veq_re_pD_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_R_xeq_re_pD_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_R_xeq_re_D_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_R_xeq_re_pD_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_R_eq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_R_veq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_R_xeq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_R_veq_re_pV_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_R_veq_re_V_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_R_veq_re_pV_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_R_xeq_re_pV_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_R_xeq_re_V_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_R_xeq_re_pV_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_R_eq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_R_veq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_R_xeq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_R_veq_re_pP_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_R_veq_re_P_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_R_veq_re_pP_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_R_xeq_re_pP_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_R_xeq_re_P_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_R_xeq_re_pP_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_R_peq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_R_vpeq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_R_xpeq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_R_vpeq_re_pM_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_R_vpeq_re_M_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_R_vpeq_re_pM_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_R_xpeq_re_pM_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_R_xpeq_re_M_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_R_xpeq_re_pM_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_R_peq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_R_vpeq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_R_xpeq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_R_vpeq_re_pH_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_R_vpeq_re_H_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_R_vpeq_re_pH_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_R_xpeq_re_pH_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_R_xpeq_re_H_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_R_xpeq_re_pH_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_R_peq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_R_vpeq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_R_xpeq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_R_vpeq_re_pD_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_R_vpeq_re_D_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_R_vpeq_re_pD_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_R_xpeq_re_pD_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_R_xpeq_re_D_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_R_xpeq_re_pD_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_R_peq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_R_vpeq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_R_xpeq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_R_vpeq_re_pV_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_R_vpeq_re_V_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_R_vpeq_re_pV_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_R_xpeq_re_pV_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_R_xpeq_re_V_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_R_xpeq_re_pV_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_R_peq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_R_vpeq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_R_xpeq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_R_vpeq_re_pP_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_R_vpeq_re_P_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_R_vpeq_re_pP_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_R_xpeq_re_pP_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_R_xpeq_re_P_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_R_xpeq_re_pP_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_R_eqm_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_R_veqm_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_R_xeqm_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_R_veqm_re_pM_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_R_veqm_re_M_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_R_veqm_re_pM_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_R_xeqm_re_pM_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_R_xeqm_re_M_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_R_xeqm_re_pM_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_R_eqm_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_R_veqm_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_R_xeqm_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_R_veqm_re_pH_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_R_veqm_re_H_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_R_veqm_re_pH_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_R_xeqm_re_pH_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_R_xeqm_re_H_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_R_xeqm_re_pH_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_R_eqm_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_R_veqm_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_R_xeqm_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_R_veqm_re_pD_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_R_veqm_re_D_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_R_veqm_re_pD_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_R_xeqm_re_pD_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_R_xeqm_re_D_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_R_xeqm_re_pD_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_R_eqm_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_R_veqm_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_R_xeqm_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_R_veqm_re_pV_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_R_veqm_re_V_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_R_veqm_re_pV_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_R_xeqm_re_pV_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_R_xeqm_re_V_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_R_xeqm_re_pV_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_R_eqm_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_R_veqm_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_R_xeqm_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_R_veqm_re_pP_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_R_veqm_re_P_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_R_veqm_re_pP_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_R_xeqm_re_pP_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_R_xeqm_re_P_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_R_xeqm_re_pP_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_R_meq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_R_vmeq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_R_xmeq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_R_vmeq_re_pM_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_R_vmeq_re_M_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_R_vmeq_re_pM_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_R_xmeq_re_pM_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_R_xmeq_re_M_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_R_xmeq_re_pM_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_R_meq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_R_vmeq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_R_xmeq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_R_vmeq_re_pH_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_R_vmeq_re_H_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_R_vmeq_re_pH_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_R_xmeq_re_pH_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_R_xmeq_re_H_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_R_xmeq_re_pH_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_R_meq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_R_vmeq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_R_xmeq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_R_vmeq_re_pD_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_R_vmeq_re_D_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_R_vmeq_re_pD_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_R_xmeq_re_pD_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_R_xmeq_re_D_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_R_xmeq_re_pD_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_R_meq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_R_vmeq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_R_xmeq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_R_vmeq_re_pV_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_R_vmeq_re_V_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_R_vmeq_re_pV_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_R_xmeq_re_pV_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_R_xmeq_re_V_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_R_xmeq_re_pV_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_R_meq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_R_vmeq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_R_xmeq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_R_vmeq_re_pP_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_R_vmeq_re_P_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_R_vmeq_re_pP_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_R_xmeq_re_pP_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_R_xmeq_re_P_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_R_xmeq_re_pP_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); # 4455 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_r_times_V_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c); void QLA_D3_V_veq_r_times_V_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c, int n); void QLA_D3_V_xeq_r_times_V_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c, int *index, int n); void QLA_D3_V_veq_r_times_pV_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict c, int n); void QLA_D3_V_veq_r_times_V_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict *c, int n); void QLA_D3_V_veq_r_times_pV_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict *c, int n); void QLA_D3_V_xeq_r_times_pV_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict c, int *index, int n); void QLA_D3_V_xeq_r_times_V_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict *c, int *index, int n); void QLA_D3_V_xeq_r_times_pV_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict *c, int *index, int n); void QLA_D3_V_eq_r_times_V_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c); void QLA_D3_V_veq_r_times_V_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c, int n); void QLA_D3_V_xeq_r_times_V_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c, int *index, int n); void QLA_D3_V_veq_r_times_pV_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict c, int n); void QLA_D3_V_veq_r_times_V_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict *c, int n); void QLA_D3_V_veq_r_times_pV_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict *c, int n); void QLA_D3_V_xeq_r_times_pV_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict c, int *index, int n); void QLA_D3_V_xeq_r_times_V_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict *c, int *index, int n); void QLA_D3_V_xeq_r_times_pV_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict *c, int *index, int n); void QLA_D3_H_eq_r_times_H_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c); void QLA_D3_H_veq_r_times_H_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c, int n); void QLA_D3_H_xeq_r_times_H_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c, int *index, int n); void QLA_D3_H_veq_r_times_pH_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict c, int n); void QLA_D3_H_veq_r_times_H_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict *c, int n); void QLA_D3_H_veq_r_times_pH_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict *c, int n); void QLA_D3_H_xeq_r_times_pH_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict c, int *index, int n); void QLA_D3_H_xeq_r_times_H_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict *c, int *index, int n); void QLA_D3_H_xeq_r_times_pH_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict *c, int *index, int n); void QLA_D3_H_eq_r_times_H_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c); void QLA_D3_H_veq_r_times_H_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c, int n); void QLA_D3_H_xeq_r_times_H_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c, int *index, int n); void QLA_D3_H_veq_r_times_pH_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict c, int n); void QLA_D3_H_veq_r_times_H_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict *c, int n); void QLA_D3_H_veq_r_times_pH_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict *c, int n); void QLA_D3_H_xeq_r_times_pH_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict c, int *index, int n); void QLA_D3_H_xeq_r_times_H_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict *c, int *index, int n); void QLA_D3_H_xeq_r_times_pH_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict *c, int *index, int n); void QLA_D3_D_eq_r_times_D_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c); void QLA_D3_D_veq_r_times_D_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c, int n); void QLA_D3_D_xeq_r_times_D_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c, int *index, int n); void QLA_D3_D_veq_r_times_pD_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict c, int n); void QLA_D3_D_veq_r_times_D_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict *c, int n); void QLA_D3_D_veq_r_times_pD_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict *c, int n); void QLA_D3_D_xeq_r_times_pD_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict c, int *index, int n); void QLA_D3_D_xeq_r_times_D_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict *c, int *index, int n); void QLA_D3_D_xeq_r_times_pD_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict *c, int *index, int n); void QLA_D3_D_eq_r_times_D_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c); void QLA_D3_D_veq_r_times_D_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c, int n); void QLA_D3_D_xeq_r_times_D_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c, int *index, int n); void QLA_D3_D_veq_r_times_pD_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict c, int n); void QLA_D3_D_veq_r_times_D_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict *c, int n); void QLA_D3_D_veq_r_times_pD_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict *c, int n); void QLA_D3_D_xeq_r_times_pD_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict c, int *index, int n); void QLA_D3_D_xeq_r_times_D_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict *c, int *index, int n); void QLA_D3_D_xeq_r_times_pD_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict *c, int *index, int n); void QLA_D3_M_eq_r_times_M_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c); void QLA_D3_M_veq_r_times_M_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c, int n); void QLA_D3_M_xeq_r_times_M_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c, int *index, int n); void QLA_D3_M_veq_r_times_pM_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict c, int n); void QLA_D3_M_veq_r_times_M_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict *c, int n); void QLA_D3_M_veq_r_times_pM_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict *c, int n); void QLA_D3_M_xeq_r_times_pM_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict c, int *index, int n); void QLA_D3_M_xeq_r_times_M_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict *c, int *index, int n); void QLA_D3_M_xeq_r_times_pM_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict *c, int *index, int n); void QLA_D3_M_eq_r_times_M_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c); void QLA_D3_M_veq_r_times_M_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c, int n); void QLA_D3_M_xeq_r_times_M_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c, int *index, int n); void QLA_D3_M_veq_r_times_pM_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict c, int n); void QLA_D3_M_veq_r_times_M_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict *c, int n); void QLA_D3_M_veq_r_times_pM_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict *c, int n); void QLA_D3_M_xeq_r_times_pM_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict c, int *index, int n); void QLA_D3_M_xeq_r_times_M_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict *c, int *index, int n); void QLA_D3_M_xeq_r_times_pM_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Real *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict *c, int *index, int n); void QLA_D3_P_eq_r_times_P_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c); void QLA_D3_P_veq_r_times_P_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c, int n); void QLA_D3_P_xeq_r_times_P_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c, int *index, int n); void QLA_D3_P_veq_r_times_pP_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict c, int n); void QLA_D3_P_veq_r_times_P_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict *c, int n); void QLA_D3_P_veq_r_times_pP_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict *c, int n); void QLA_D3_P_xeq_r_times_pP_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict c, int *index, int n); void QLA_D3_P_xeq_r_times_P_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict *c, int *index, int n); void QLA_D3_P_xeq_r_times_pP_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict *c, int *index, int n); void QLA_D3_P_eq_r_times_P_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c); void QLA_D3_P_veq_r_times_P_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c, int n); void QLA_D3_P_xeq_r_times_P_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c, int *index, int n); void QLA_D3_P_veq_r_times_pP_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict c, int n); void QLA_D3_P_veq_r_times_P_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict *c, int n); void QLA_D3_P_veq_r_times_pP_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict *c, int n); void QLA_D3_P_xeq_r_times_pP_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict c, int *index, int n); void QLA_D3_P_xeq_r_times_P_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict *c, int *index, int n); void QLA_D3_P_xeq_r_times_pP_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Real *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict *c, int *index, int n); # 4590 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_c_times_V_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c); void QLA_D3_V_veq_c_times_V_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c, int n); void QLA_D3_V_xeq_c_times_V_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c, int *index, int n); void QLA_D3_V_veq_c_times_pV_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict c, int n); void QLA_D3_V_veq_c_times_V_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict *c, int n); void QLA_D3_V_veq_c_times_pV_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict *c, int n); void QLA_D3_V_xeq_c_times_pV_plus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict c, int *index, int n); void QLA_D3_V_xeq_c_times_V_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict *c, int *index, int n); void QLA_D3_V_xeq_c_times_pV_plus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict *c, int *index, int n); void QLA_D3_V_eq_c_times_V_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c); void QLA_D3_V_veq_c_times_V_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c, int n); void QLA_D3_V_xeq_c_times_V_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict c, int *index, int n); void QLA_D3_V_veq_c_times_pV_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict c, int n); void QLA_D3_V_veq_c_times_V_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict *c, int n); void QLA_D3_V_veq_c_times_pV_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict *c, int n); void QLA_D3_V_xeq_c_times_pV_minus_V ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict c, int *index, int n); void QLA_D3_V_xeq_c_times_V_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict b, QLA_D3_ColorVector *restrict *c, int *index, int n); void QLA_D3_V_xeq_c_times_pV_minus_pV ( QLA_D3_ColorVector *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorVector *restrict *b, QLA_D3_ColorVector *restrict *c, int *index, int n); void QLA_D3_H_eq_c_times_H_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c); void QLA_D3_H_veq_c_times_H_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c, int n); void QLA_D3_H_xeq_c_times_H_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c, int *index, int n); void QLA_D3_H_veq_c_times_pH_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict c, int n); void QLA_D3_H_veq_c_times_H_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict *c, int n); void QLA_D3_H_veq_c_times_pH_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict *c, int n); void QLA_D3_H_xeq_c_times_pH_plus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict c, int *index, int n); void QLA_D3_H_xeq_c_times_H_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict *c, int *index, int n); void QLA_D3_H_xeq_c_times_pH_plus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict *c, int *index, int n); void QLA_D3_H_eq_c_times_H_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c); void QLA_D3_H_veq_c_times_H_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c, int n); void QLA_D3_H_xeq_c_times_H_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict c, int *index, int n); void QLA_D3_H_veq_c_times_pH_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict c, int n); void QLA_D3_H_veq_c_times_H_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict *c, int n); void QLA_D3_H_veq_c_times_pH_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict *c, int n); void QLA_D3_H_xeq_c_times_pH_minus_H ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict c, int *index, int n); void QLA_D3_H_xeq_c_times_H_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict b, QLA_D3_HalfFermion *restrict *c, int *index, int n); void QLA_D3_H_xeq_c_times_pH_minus_pH ( QLA_D3_HalfFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_HalfFermion *restrict *b, QLA_D3_HalfFermion *restrict *c, int *index, int n); void QLA_D3_D_eq_c_times_D_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c); void QLA_D3_D_veq_c_times_D_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c, int n); void QLA_D3_D_xeq_c_times_D_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c, int *index, int n); void QLA_D3_D_veq_c_times_pD_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict c, int n); void QLA_D3_D_veq_c_times_D_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict *c, int n); void QLA_D3_D_veq_c_times_pD_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict *c, int n); void QLA_D3_D_xeq_c_times_pD_plus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict c, int *index, int n); void QLA_D3_D_xeq_c_times_D_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict *c, int *index, int n); void QLA_D3_D_xeq_c_times_pD_plus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict *c, int *index, int n); void QLA_D3_D_eq_c_times_D_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c); void QLA_D3_D_veq_c_times_D_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c, int n); void QLA_D3_D_xeq_c_times_D_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict c, int *index, int n); void QLA_D3_D_veq_c_times_pD_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict c, int n); void QLA_D3_D_veq_c_times_D_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict *c, int n); void QLA_D3_D_veq_c_times_pD_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict *c, int n); void QLA_D3_D_xeq_c_times_pD_minus_D ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict c, int *index, int n); void QLA_D3_D_xeq_c_times_D_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict b, QLA_D3_DiracFermion *restrict *c, int *index, int n); void QLA_D3_D_xeq_c_times_pD_minus_pD ( QLA_D3_DiracFermion *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracFermion *restrict *b, QLA_D3_DiracFermion *restrict *c, int *index, int n); void QLA_D3_M_eq_c_times_M_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c); void QLA_D3_M_veq_c_times_M_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c, int n); void QLA_D3_M_xeq_c_times_M_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c, int *index, int n); void QLA_D3_M_veq_c_times_pM_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict c, int n); void QLA_D3_M_veq_c_times_M_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict *c, int n); void QLA_D3_M_veq_c_times_pM_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict *c, int n); void QLA_D3_M_xeq_c_times_pM_plus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict c, int *index, int n); void QLA_D3_M_xeq_c_times_M_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict *c, int *index, int n); void QLA_D3_M_xeq_c_times_pM_plus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict *c, int *index, int n); void QLA_D3_M_eq_c_times_M_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c); void QLA_D3_M_veq_c_times_M_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c, int n); void QLA_D3_M_xeq_c_times_M_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict c, int *index, int n); void QLA_D3_M_veq_c_times_pM_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict c, int n); void QLA_D3_M_veq_c_times_M_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict *c, int n); void QLA_D3_M_veq_c_times_pM_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict *c, int n); void QLA_D3_M_xeq_c_times_pM_minus_M ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict c, int *index, int n); void QLA_D3_M_xeq_c_times_M_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict b, QLA_D3_ColorMatrix *restrict *c, int *index, int n); void QLA_D3_M_xeq_c_times_pM_minus_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, QLA_D3_ColorMatrix *restrict *b, QLA_D3_ColorMatrix *restrict *c, int *index, int n); void QLA_D3_P_eq_c_times_P_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c); void QLA_D3_P_veq_c_times_P_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c, int n); void QLA_D3_P_xeq_c_times_P_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c, int *index, int n); void QLA_D3_P_veq_c_times_pP_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict c, int n); void QLA_D3_P_veq_c_times_P_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict *c, int n); void QLA_D3_P_veq_c_times_pP_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict *c, int n); void QLA_D3_P_xeq_c_times_pP_plus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict c, int *index, int n); void QLA_D3_P_xeq_c_times_P_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict *c, int *index, int n); void QLA_D3_P_xeq_c_times_pP_plus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict *c, int *index, int n); void QLA_D3_P_eq_c_times_P_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c); void QLA_D3_P_veq_c_times_P_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c, int n); void QLA_D3_P_xeq_c_times_P_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict c, int *index, int n); void QLA_D3_P_veq_c_times_pP_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict c, int n); void QLA_D3_P_veq_c_times_P_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict *c, int n); void QLA_D3_P_veq_c_times_pP_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict *c, int n); void QLA_D3_P_xeq_c_times_pP_minus_P ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict c, int *index, int n); void QLA_D3_P_xeq_c_times_P_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict b, QLA_D3_DiracPropagator *restrict *c, int *index, int n); void QLA_D3_P_xeq_c_times_pP_minus_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D_Complex *restrict a, QLA_D3_DiracPropagator *restrict *b, QLA_D3_DiracPropagator *restrict *c, int *index, int n); # 4730 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_V_mask_I ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_Int *restrict b); void QLA_D3_V_veq_V_mask_I ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_Int *restrict b, int n); void QLA_D3_V_xeq_V_mask_I ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_Int *restrict b, int *index, int n); void QLA_D3_V_veq_pV_mask_I ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_Int *restrict b, int n); void QLA_D3_V_veq_V_mask_pI ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_Int *restrict *b, int n); void QLA_D3_V_veq_pV_mask_pI ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_Int *restrict *b, int n); void QLA_D3_V_xeq_pV_mask_I ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_Int *restrict b, int *index, int n); void QLA_D3_V_xeq_V_mask_pI ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, QLA_Int *restrict *b, int *index, int n); void QLA_D3_V_xeq_pV_mask_pI ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, QLA_Int *restrict *b, int *index, int n); void QLA_D3_H_eq_H_mask_I ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_Int *restrict b); void QLA_D3_H_veq_H_mask_I ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_Int *restrict b, int n); void QLA_D3_H_xeq_H_mask_I ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_Int *restrict b, int *index, int n); void QLA_D3_H_veq_pH_mask_I ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_Int *restrict b, int n); void QLA_D3_H_veq_H_mask_pI ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_Int *restrict *b, int n); void QLA_D3_H_veq_pH_mask_pI ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_Int *restrict *b, int n); void QLA_D3_H_xeq_pH_mask_I ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_Int *restrict b, int *index, int n); void QLA_D3_H_xeq_H_mask_pI ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, QLA_Int *restrict *b, int *index, int n); void QLA_D3_H_xeq_pH_mask_pI ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_Int *restrict *b, int *index, int n); void QLA_D3_D_eq_D_mask_I ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_Int *restrict b); void QLA_D3_D_veq_D_mask_I ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_Int *restrict b, int n); void QLA_D3_D_xeq_D_mask_I ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_Int *restrict b, int *index, int n); void QLA_D3_D_veq_pD_mask_I ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_Int *restrict b, int n); void QLA_D3_D_veq_D_mask_pI ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_Int *restrict *b, int n); void QLA_D3_D_veq_pD_mask_pI ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_Int *restrict *b, int n); void QLA_D3_D_xeq_pD_mask_I ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_Int *restrict b, int *index, int n); void QLA_D3_D_xeq_D_mask_pI ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, QLA_Int *restrict *b, int *index, int n); void QLA_D3_D_xeq_pD_mask_pI ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_Int *restrict *b, int *index, int n); void QLA_D3_M_eq_M_mask_I ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_Int *restrict b); void QLA_D3_M_veq_M_mask_I ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_Int *restrict b, int n); void QLA_D3_M_xeq_M_mask_I ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_Int *restrict b, int *index, int n); void QLA_D3_M_veq_pM_mask_I ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_Int *restrict b, int n); void QLA_D3_M_veq_M_mask_pI ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_Int *restrict *b, int n); void QLA_D3_M_veq_pM_mask_pI ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_Int *restrict *b, int n); void QLA_D3_M_xeq_pM_mask_I ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_Int *restrict b, int *index, int n); void QLA_D3_M_xeq_M_mask_pI ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_Int *restrict *b, int *index, int n); void QLA_D3_M_xeq_pM_mask_pI ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_Int *restrict *b, int *index, int n); void QLA_D3_P_eq_P_mask_I ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_Int *restrict b); void QLA_D3_P_veq_P_mask_I ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_Int *restrict b, int n); void QLA_D3_P_xeq_P_mask_I ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_Int *restrict b, int *index, int n); void QLA_D3_P_veq_pP_mask_I ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_Int *restrict b, int n); void QLA_D3_P_veq_P_mask_pI ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_Int *restrict *b, int n); void QLA_D3_P_veq_pP_mask_pI ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_Int *restrict *b, int n); void QLA_D3_P_xeq_pP_mask_I ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_Int *restrict b, int *index, int n); void QLA_D3_P_xeq_P_mask_pI ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_Int *restrict *b, int *index, int n); void QLA_D3_P_xeq_pP_mask_pI ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_Int *restrict *b, int *index, int n); # 4805 "./qla-1.7.1/qla_d3.h" void QLA_D3_r_eq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_r_veq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_r_xeq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_r_veq_norm2_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_r_xeq_norm2_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_r_eq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_r_veq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_r_xeq_norm2_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_r_veq_norm2_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_r_xeq_norm2_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_r_eq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_r_veq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_r_xeq_norm2_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_r_veq_norm2_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_r_xeq_norm2_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_r_eq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_r_veq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_r_xeq_norm2_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_r_veq_norm2_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_r_xeq_norm2_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_r_eq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_r_veq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_r_xeq_norm2_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_r_veq_norm2_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_r_xeq_norm2_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); # 4855 "./qla-1.7.1/qla_d3.h" void QLA_D3_c_eq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_c_veq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_c_xeq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_c_veq_pV_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_c_veq_V_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_c_veq_pV_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_c_xeq_pV_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_c_xeq_V_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_c_xeq_pV_dot_pV ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_c_eq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_c_veq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_c_xeq_H_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_c_veq_pH_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_c_veq_H_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_c_veq_pH_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_c_xeq_pH_dot_H ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_c_xeq_H_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_c_xeq_pH_dot_pH ( QLA_D_Complex *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_c_eq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_c_veq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_c_xeq_D_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_c_veq_pD_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_c_veq_D_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_c_veq_pD_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_c_xeq_pD_dot_D ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_c_xeq_D_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_c_xeq_pD_dot_pD ( QLA_D_Complex *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_c_eq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_c_veq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_c_xeq_M_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_c_veq_pM_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_c_veq_M_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_c_veq_pM_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_c_xeq_pM_dot_M ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_c_xeq_M_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_c_xeq_pM_dot_pM ( QLA_D_Complex *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_c_eq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_c_veq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_c_xeq_P_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_c_veq_pP_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_c_veq_P_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_c_veq_pP_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_c_xeq_pP_dot_P ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_c_xeq_P_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_c_xeq_pP_dot_pP ( QLA_D_Complex *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_r_eq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b); void QLA_D3_r_veq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_r_xeq_re_V_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_r_veq_re_pV_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int n); void QLA_D3_r_veq_re_V_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_r_veq_re_pV_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int n); void QLA_D3_r_xeq_re_pV_dot_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict b, int *index, int n); void QLA_D3_r_xeq_re_V_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_r_xeq_re_pV_dot_pV ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict *a, QLA_D3_ColorVector *restrict *b, int *index, int n); void QLA_D3_r_eq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b); void QLA_D3_r_veq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_r_xeq_re_H_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_r_veq_re_pH_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int n); void QLA_D3_r_veq_re_H_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_r_veq_re_pH_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int n); void QLA_D3_r_xeq_re_pH_dot_H ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict b, int *index, int n); void QLA_D3_r_xeq_re_H_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_r_xeq_re_pH_dot_pH ( QLA_D_Real *restrict r, QLA_D3_HalfFermion *restrict *a, QLA_D3_HalfFermion *restrict *b, int *index, int n); void QLA_D3_r_eq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b); void QLA_D3_r_veq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_r_xeq_re_D_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_r_veq_re_pD_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int n); void QLA_D3_r_veq_re_D_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_r_veq_re_pD_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int n); void QLA_D3_r_xeq_re_pD_dot_D ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict b, int *index, int n); void QLA_D3_r_xeq_re_D_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_r_xeq_re_pD_dot_pD ( QLA_D_Real *restrict r, QLA_D3_DiracFermion *restrict *a, QLA_D3_DiracFermion *restrict *b, int *index, int n); void QLA_D3_r_eq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b); void QLA_D3_r_veq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_r_xeq_re_M_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_r_veq_re_pM_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int n); void QLA_D3_r_veq_re_M_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_r_veq_re_pM_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int n); void QLA_D3_r_xeq_re_pM_dot_M ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict b, int *index, int n); void QLA_D3_r_xeq_re_M_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_r_xeq_re_pM_dot_pM ( QLA_D_Real *restrict r, QLA_D3_ColorMatrix *restrict *a, QLA_D3_ColorMatrix *restrict *b, int *index, int n); void QLA_D3_r_eq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b); void QLA_D3_r_veq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_r_xeq_re_P_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_r_veq_re_pP_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int n); void QLA_D3_r_veq_re_P_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_r_veq_re_pP_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int n); void QLA_D3_r_xeq_re_pP_dot_P ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict b, int *index, int n); void QLA_D3_r_xeq_re_P_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); void QLA_D3_r_xeq_re_pP_dot_pP ( QLA_D_Real *restrict r, QLA_D3_DiracPropagator *restrict *a, QLA_D3_DiracPropagator *restrict *b, int *index, int n); # 4990 "./qla-1.7.1/qla_d3.h" void QLA_D3_v_eq_sum_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_v_veq_sum_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_v_xeq_sum_V ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_v_veq_sum_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int n); void QLA_D3_v_xeq_sum_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict *a, int *index, int n); void QLA_D3_h_eq_sum_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_h_veq_sum_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_h_xeq_sum_H ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_h_veq_sum_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int n); void QLA_D3_h_xeq_sum_pH ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict *a, int *index, int n); void QLA_D3_d_eq_sum_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_d_veq_sum_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_d_xeq_sum_D ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_d_veq_sum_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int n); void QLA_D3_d_xeq_sum_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict *a, int *index, int n); void QLA_D3_m_eq_sum_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_m_veq_sum_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_m_xeq_sum_M ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_m_veq_sum_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int n); void QLA_D3_m_xeq_sum_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict *a, int *index, int n); void QLA_D3_p_eq_sum_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_p_veq_sum_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_p_xeq_sum_P ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_p_veq_sum_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int n); void QLA_D3_p_xeq_sum_pP ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict *a, int *index, int n); # 5045 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_zero ( QLA_D3_ColorVector *restrict r); void QLA_D3_V_veq_zero ( QLA_D3_ColorVector *restrict r, int n); void QLA_D3_V_xeq_zero ( QLA_D3_ColorVector *restrict r, int *index, int n); void QLA_D3_H_eq_zero ( QLA_D3_HalfFermion *restrict r); void QLA_D3_H_veq_zero ( QLA_D3_HalfFermion *restrict r, int n); void QLA_D3_H_xeq_zero ( QLA_D3_HalfFermion *restrict r, int *index, int n); void QLA_D3_D_eq_zero ( QLA_D3_DiracFermion *restrict r); void QLA_D3_D_veq_zero ( QLA_D3_DiracFermion *restrict r, int n); void QLA_D3_D_xeq_zero ( QLA_D3_DiracFermion *restrict r, int *index, int n); void QLA_D3_M_eq_zero ( QLA_D3_ColorMatrix *restrict r); void QLA_D3_M_veq_zero ( QLA_D3_ColorMatrix *restrict r, int n); void QLA_D3_M_xeq_zero ( QLA_D3_ColorMatrix *restrict r, int *index, int n); void QLA_D3_P_eq_zero ( QLA_D3_DiracPropagator *restrict r); void QLA_D3_P_veq_zero ( QLA_D3_DiracPropagator *restrict r, int n); void QLA_D3_P_xeq_zero ( QLA_D3_DiracPropagator *restrict r, int *index, int n); # 5085 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_v ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a); void QLA_D3_V_veq_v ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int n); void QLA_D3_V_xeq_v ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorVector *restrict a, int *index, int n); void QLA_D3_H_eq_h ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a); void QLA_D3_H_veq_h ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int n); void QLA_D3_H_xeq_h ( QLA_D3_HalfFermion *restrict r, QLA_D3_HalfFermion *restrict a, int *index, int n); void QLA_D3_D_eq_d ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a); void QLA_D3_D_veq_d ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int n); void QLA_D3_D_xeq_d ( QLA_D3_DiracFermion *restrict r, QLA_D3_DiracFermion *restrict a, int *index, int n); void QLA_D3_M_eq_m ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a); void QLA_D3_M_veq_m ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int n); void QLA_D3_M_xeq_m ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, int *index, int n); void QLA_D3_P_eq_p ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a); void QLA_D3_P_veq_p ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int n); void QLA_D3_P_xeq_p ( QLA_D3_DiracPropagator *restrict r, QLA_D3_DiracPropagator *restrict a, int *index, int n); void QLA_D3_M_eq_c ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a); void QLA_D3_M_veq_c ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, int n); void QLA_D3_M_xeq_c ( QLA_D3_ColorMatrix *restrict r, QLA_D_Complex *restrict a, int *index, int n); # 5132 "./qla-1.7.1/qla_d3.h" void QLA_D3_V_eq_gaussian_S ( QLA_D3_ColorVector *restrict r, QLA_RandomState *restrict a); void QLA_D3_V_veq_gaussian_S ( QLA_D3_ColorVector *restrict r, QLA_RandomState *restrict a, int n); void QLA_D3_V_xeq_gaussian_S ( QLA_D3_ColorVector *restrict r, QLA_RandomState *restrict a, int *index, int n); void QLA_D3_V_veq_gaussian_pS ( QLA_D3_ColorVector *restrict r, QLA_RandomState *restrict *a, int n); void QLA_D3_V_xeq_gaussian_pS ( QLA_D3_ColorVector *restrict r, QLA_RandomState *restrict *a, int *index, int n); void QLA_D3_H_eq_gaussian_S ( QLA_D3_HalfFermion *restrict r, QLA_RandomState *restrict a); void QLA_D3_H_veq_gaussian_S ( QLA_D3_HalfFermion *restrict r, QLA_RandomState *restrict a, int n); void QLA_D3_H_xeq_gaussian_S ( QLA_D3_HalfFermion *restrict r, QLA_RandomState *restrict a, int *index, int n); void QLA_D3_H_veq_gaussian_pS ( QLA_D3_HalfFermion *restrict r, QLA_RandomState *restrict *a, int n); void QLA_D3_H_xeq_gaussian_pS ( QLA_D3_HalfFermion *restrict r, QLA_RandomState *restrict *a, int *index, int n); void QLA_D3_D_eq_gaussian_S ( QLA_D3_DiracFermion *restrict r, QLA_RandomState *restrict a); void QLA_D3_D_veq_gaussian_S ( QLA_D3_DiracFermion *restrict r, QLA_RandomState *restrict a, int n); void QLA_D3_D_xeq_gaussian_S ( QLA_D3_DiracFermion *restrict r, QLA_RandomState *restrict a, int *index, int n); void QLA_D3_D_veq_gaussian_pS ( QLA_D3_DiracFermion *restrict r, QLA_RandomState *restrict *a, int n); void QLA_D3_D_xeq_gaussian_pS ( QLA_D3_DiracFermion *restrict r, QLA_RandomState *restrict *a, int *index, int n); void QLA_D3_M_eq_gaussian_S ( QLA_D3_ColorMatrix *restrict r, QLA_RandomState *restrict a); void QLA_D3_M_veq_gaussian_S ( QLA_D3_ColorMatrix *restrict r, QLA_RandomState *restrict a, int n); void QLA_D3_M_xeq_gaussian_S ( QLA_D3_ColorMatrix *restrict r, QLA_RandomState *restrict a, int *index, int n); void QLA_D3_M_veq_gaussian_pS ( QLA_D3_ColorMatrix *restrict r, QLA_RandomState *restrict *a, int n); void QLA_D3_M_xeq_gaussian_pS ( QLA_D3_ColorMatrix *restrict r, QLA_RandomState *restrict *a, int *index, int n); void QLA_D3_P_eq_gaussian_S ( QLA_D3_DiracPropagator *restrict r, QLA_RandomState *restrict a); void QLA_D3_P_veq_gaussian_S ( QLA_D3_DiracPropagator *restrict r, QLA_RandomState *restrict a, int n); void QLA_D3_P_xeq_gaussian_S ( QLA_D3_DiracPropagator *restrict r, QLA_RandomState *restrict a, int *index, int n); void QLA_D3_P_veq_gaussian_pS ( QLA_D3_DiracPropagator *restrict r, QLA_RandomState *restrict *a, int n); void QLA_D3_P_xeq_gaussian_pS ( QLA_D3_DiracPropagator *restrict r, QLA_RandomState *restrict *a, int *index, int n); # 9 "qla-1.7.1/QLA_D3_V_vpeq_M_times_pV.c" 2 # 1 "/usr/include/math.h" 1 3 4 # 27 "/usr/include/math.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" 1 3 4 # 28 "/usr/include/math.h" 2 3 4 # 40 "/usr/include/math.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" 1 3 4 # 25 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" 1 3 4 # 26 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" 2 3 4 # 41 "/usr/include/math.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/floatn.h" 1 3 4 # 120 "/usr/include/x86_64-linux-gnu/bits/floatn.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" 1 3 4 # 24 "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/long-double.h" 1 3 4 # 25 "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" 2 3 4 # 207 "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" 3 4 typedef float _Float32; # 244 "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" 3 4 typedef double _Float64; # 261 "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" 3 4 typedef double _Float32x; # 278 "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" 3 4 typedef long double _Float64x; # 121 "/usr/include/x86_64-linux-gnu/bits/floatn.h" 2 3 4 # 44 "/usr/include/math.h" 2 3 4 # 138 "/usr/include/math.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/flt-eval-method.h" 1 3 4 # 139 "/usr/include/math.h" 2 3 4 # 149 "/usr/include/math.h" 3 4 typedef float float_t; typedef double double_t; # 190 "/usr/include/math.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/fp-logb.h" 1 3 4 # 191 "/usr/include/math.h" 2 3 4 # 233 "/usr/include/math.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/fp-fast.h" 1 3 4 # 234 "/usr/include/math.h" 2 3 4 # 289 "/usr/include/math.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls-helper-functions.h" 1 3 4 # 21 "/usr/include/x86_64-linux-gnu/bits/mathcalls-helper-functions.h" 3 4 extern int __fpclassify (double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __signbit (double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __isinf (double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __finite (double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __isnan (double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __iseqsig (double __x, double __y) __attribute__ ((__nothrow__ )); extern int __issignaling (double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); # 290 "/usr/include/math.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 1 3 4 # 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern double acos (double __x) __attribute__ ((__nothrow__ )); extern double __acos (double __x) __attribute__ ((__nothrow__ )); extern double asin (double __x) __attribute__ ((__nothrow__ )); extern double __asin (double __x) __attribute__ ((__nothrow__ )); extern double atan (double __x) __attribute__ ((__nothrow__ )); extern double __atan (double __x) __attribute__ ((__nothrow__ )); extern double atan2 (double __y, double __x) __attribute__ ((__nothrow__ )); extern double __atan2 (double __y, double __x) __attribute__ ((__nothrow__ )); extern double cos (double __x) __attribute__ ((__nothrow__ )); extern double __cos (double __x) __attribute__ ((__nothrow__ )); extern double sin (double __x) __attribute__ ((__nothrow__ )); extern double __sin (double __x) __attribute__ ((__nothrow__ )); extern double tan (double __x) __attribute__ ((__nothrow__ )); extern double __tan (double __x) __attribute__ ((__nothrow__ )); extern double cosh (double __x) __attribute__ ((__nothrow__ )); extern double __cosh (double __x) __attribute__ ((__nothrow__ )); extern double sinh (double __x) __attribute__ ((__nothrow__ )); extern double __sinh (double __x) __attribute__ ((__nothrow__ )); extern double tanh (double __x) __attribute__ ((__nothrow__ )); extern double __tanh (double __x) __attribute__ ((__nothrow__ )); # 85 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern double acosh (double __x) __attribute__ ((__nothrow__ )); extern double __acosh (double __x) __attribute__ ((__nothrow__ )); extern double asinh (double __x) __attribute__ ((__nothrow__ )); extern double __asinh (double __x) __attribute__ ((__nothrow__ )); extern double atanh (double __x) __attribute__ ((__nothrow__ )); extern double __atanh (double __x) __attribute__ ((__nothrow__ )); extern double exp (double __x) __attribute__ ((__nothrow__ )); extern double __exp (double __x) __attribute__ ((__nothrow__ )); extern double frexp (double __x, int *__exponent) __attribute__ ((__nothrow__ )); extern double __frexp (double __x, int *__exponent) __attribute__ ((__nothrow__ )); extern double ldexp (double __x, int __exponent) __attribute__ ((__nothrow__ )); extern double __ldexp (double __x, int __exponent) __attribute__ ((__nothrow__ )); extern double log (double __x) __attribute__ ((__nothrow__ )); extern double __log (double __x) __attribute__ ((__nothrow__ )); extern double log10 (double __x) __attribute__ ((__nothrow__ )); extern double __log10 (double __x) __attribute__ ((__nothrow__ )); extern double modf (double __x, double *__iptr) __attribute__ ((__nothrow__ )); extern double __modf (double __x, double *__iptr) __attribute__ ((__nothrow__ )) __attribute__ ((__nonnull__ (2))); # 119 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern double expm1 (double __x) __attribute__ ((__nothrow__ )); extern double __expm1 (double __x) __attribute__ ((__nothrow__ )); extern double log1p (double __x) __attribute__ ((__nothrow__ )); extern double __log1p (double __x) __attribute__ ((__nothrow__ )); extern double logb (double __x) __attribute__ ((__nothrow__ )); extern double __logb (double __x) __attribute__ ((__nothrow__ )); extern double exp2 (double __x) __attribute__ ((__nothrow__ )); extern double __exp2 (double __x) __attribute__ ((__nothrow__ )); extern double log2 (double __x) __attribute__ ((__nothrow__ )); extern double __log2 (double __x) __attribute__ ((__nothrow__ )); extern double pow (double __x, double __y) __attribute__ ((__nothrow__ )); extern double __pow (double __x, double __y) __attribute__ ((__nothrow__ )); extern double sqrt (double __x) __attribute__ ((__nothrow__ )); extern double __sqrt (double __x) __attribute__ ((__nothrow__ )); extern double hypot (double __x, double __y) __attribute__ ((__nothrow__ )); extern double __hypot (double __x, double __y) __attribute__ ((__nothrow__ )); extern double cbrt (double __x) __attribute__ ((__nothrow__ )); extern double __cbrt (double __x) __attribute__ ((__nothrow__ )); extern double ceil (double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double __ceil (double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double fabs (double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double __fabs (double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double floor (double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double __floor (double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double fmod (double __x, double __y) __attribute__ ((__nothrow__ )); extern double __fmod (double __x, double __y) __attribute__ ((__nothrow__ )); # 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern int isinf (double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int finite (double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double drem (double __x, double __y) __attribute__ ((__nothrow__ )); extern double __drem (double __x, double __y) __attribute__ ((__nothrow__ )); extern double significand (double __x) __attribute__ ((__nothrow__ )); extern double __significand (double __x) __attribute__ ((__nothrow__ )); extern double copysign (double __x, double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double __copysign (double __x, double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double nan (const char *__tagb) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double __nan (const char *__tagb) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); # 211 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern int isnan (double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double j0 (double) __attribute__ ((__nothrow__ )); extern double __j0 (double) __attribute__ ((__nothrow__ )); extern double j1 (double) __attribute__ ((__nothrow__ )); extern double __j1 (double) __attribute__ ((__nothrow__ )); extern double jn (int, double) __attribute__ ((__nothrow__ )); extern double __jn (int, double) __attribute__ ((__nothrow__ )); extern double y0 (double) __attribute__ ((__nothrow__ )); extern double __y0 (double) __attribute__ ((__nothrow__ )); extern double y1 (double) __attribute__ ((__nothrow__ )); extern double __y1 (double) __attribute__ ((__nothrow__ )); extern double yn (int, double) __attribute__ ((__nothrow__ )); extern double __yn (int, double) __attribute__ ((__nothrow__ )); extern double erf (double) __attribute__ ((__nothrow__ )); extern double __erf (double) __attribute__ ((__nothrow__ )); extern double erfc (double) __attribute__ ((__nothrow__ )); extern double __erfc (double) __attribute__ ((__nothrow__ )); extern double lgamma (double) __attribute__ ((__nothrow__ )); extern double __lgamma (double) __attribute__ ((__nothrow__ )); extern double tgamma (double) __attribute__ ((__nothrow__ )); extern double __tgamma (double) __attribute__ ((__nothrow__ )); extern double gamma (double) __attribute__ ((__nothrow__ )); extern double __gamma (double) __attribute__ ((__nothrow__ )); extern double lgamma_r (double, int *__signgamp) __attribute__ ((__nothrow__ )); extern double __lgamma_r (double, int *__signgamp) __attribute__ ((__nothrow__ )); extern double rint (double __x) __attribute__ ((__nothrow__ )); extern double __rint (double __x) __attribute__ ((__nothrow__ )); extern double nextafter (double __x, double __y) __attribute__ ((__nothrow__ )); extern double __nextafter (double __x, double __y) __attribute__ ((__nothrow__ )); extern double nexttoward (double __x, long double __y) __attribute__ ((__nothrow__ )); extern double __nexttoward (double __x, long double __y) __attribute__ ((__nothrow__ )); # 272 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern double remainder (double __x, double __y) __attribute__ ((__nothrow__ )); extern double __remainder (double __x, double __y) __attribute__ ((__nothrow__ )); extern double scalbn (double __x, int __n) __attribute__ ((__nothrow__ )); extern double __scalbn (double __x, int __n) __attribute__ ((__nothrow__ )); extern int ilogb (double __x) __attribute__ ((__nothrow__ )); extern int __ilogb (double __x) __attribute__ ((__nothrow__ )); # 290 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern double scalbln (double __x, long int __n) __attribute__ ((__nothrow__ )); extern double __scalbln (double __x, long int __n) __attribute__ ((__nothrow__ )); extern double nearbyint (double __x) __attribute__ ((__nothrow__ )); extern double __nearbyint (double __x) __attribute__ ((__nothrow__ )); extern double round (double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double __round (double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double trunc (double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double __trunc (double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double remquo (double __x, double __y, int *__quo) __attribute__ ((__nothrow__ )); extern double __remquo (double __x, double __y, int *__quo) __attribute__ ((__nothrow__ )); extern long int lrint (double __x) __attribute__ ((__nothrow__ )); extern long int __lrint (double __x) __attribute__ ((__nothrow__ )); __extension__ extern long long int llrint (double __x) __attribute__ ((__nothrow__ )); extern long long int __llrint (double __x) __attribute__ ((__nothrow__ )); extern long int lround (double __x) __attribute__ ((__nothrow__ )); extern long int __lround (double __x) __attribute__ ((__nothrow__ )); __extension__ extern long long int llround (double __x) __attribute__ ((__nothrow__ )); extern long long int __llround (double __x) __attribute__ ((__nothrow__ )); extern double fdim (double __x, double __y) __attribute__ ((__nothrow__ )); extern double __fdim (double __x, double __y) __attribute__ ((__nothrow__ )); extern double fmax (double __x, double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double __fmax (double __x, double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double fmin (double __x, double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double __fmin (double __x, double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern double fma (double __x, double __y, double __z) __attribute__ ((__nothrow__ )); extern double __fma (double __x, double __y, double __z) __attribute__ ((__nothrow__ )); # 396 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern double scalb (double __x, double __n) __attribute__ ((__nothrow__ )); extern double __scalb (double __x, double __n) __attribute__ ((__nothrow__ )); # 291 "/usr/include/math.h" 2 3 4 # 306 "/usr/include/math.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls-helper-functions.h" 1 3 4 # 21 "/usr/include/x86_64-linux-gnu/bits/mathcalls-helper-functions.h" 3 4 extern int __fpclassifyf (float __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __signbitf (float __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __isinff (float __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __finitef (float __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __isnanf (float __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __iseqsigf (float __x, float __y) __attribute__ ((__nothrow__ )); extern int __issignalingf (float __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); # 307 "/usr/include/math.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 1 3 4 # 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern float acosf (float __x) __attribute__ ((__nothrow__ )); extern float __acosf (float __x) __attribute__ ((__nothrow__ )); extern float asinf (float __x) __attribute__ ((__nothrow__ )); extern float __asinf (float __x) __attribute__ ((__nothrow__ )); extern float atanf (float __x) __attribute__ ((__nothrow__ )); extern float __atanf (float __x) __attribute__ ((__nothrow__ )); extern float atan2f (float __y, float __x) __attribute__ ((__nothrow__ )); extern float __atan2f (float __y, float __x) __attribute__ ((__nothrow__ )); extern float cosf (float __x) __attribute__ ((__nothrow__ )); extern float __cosf (float __x) __attribute__ ((__nothrow__ )); extern float sinf (float __x) __attribute__ ((__nothrow__ )); extern float __sinf (float __x) __attribute__ ((__nothrow__ )); extern float tanf (float __x) __attribute__ ((__nothrow__ )); extern float __tanf (float __x) __attribute__ ((__nothrow__ )); extern float coshf (float __x) __attribute__ ((__nothrow__ )); extern float __coshf (float __x) __attribute__ ((__nothrow__ )); extern float sinhf (float __x) __attribute__ ((__nothrow__ )); extern float __sinhf (float __x) __attribute__ ((__nothrow__ )); extern float tanhf (float __x) __attribute__ ((__nothrow__ )); extern float __tanhf (float __x) __attribute__ ((__nothrow__ )); # 85 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern float acoshf (float __x) __attribute__ ((__nothrow__ )); extern float __acoshf (float __x) __attribute__ ((__nothrow__ )); extern float asinhf (float __x) __attribute__ ((__nothrow__ )); extern float __asinhf (float __x) __attribute__ ((__nothrow__ )); extern float atanhf (float __x) __attribute__ ((__nothrow__ )); extern float __atanhf (float __x) __attribute__ ((__nothrow__ )); extern float expf (float __x) __attribute__ ((__nothrow__ )); extern float __expf (float __x) __attribute__ ((__nothrow__ )); extern float frexpf (float __x, int *__exponent) __attribute__ ((__nothrow__ )); extern float __frexpf (float __x, int *__exponent) __attribute__ ((__nothrow__ )); extern float ldexpf (float __x, int __exponent) __attribute__ ((__nothrow__ )); extern float __ldexpf (float __x, int __exponent) __attribute__ ((__nothrow__ )); extern float logf (float __x) __attribute__ ((__nothrow__ )); extern float __logf (float __x) __attribute__ ((__nothrow__ )); extern float log10f (float __x) __attribute__ ((__nothrow__ )); extern float __log10f (float __x) __attribute__ ((__nothrow__ )); extern float modff (float __x, float *__iptr) __attribute__ ((__nothrow__ )); extern float __modff (float __x, float *__iptr) __attribute__ ((__nothrow__ )) __attribute__ ((__nonnull__ (2))); # 119 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern float expm1f (float __x) __attribute__ ((__nothrow__ )); extern float __expm1f (float __x) __attribute__ ((__nothrow__ )); extern float log1pf (float __x) __attribute__ ((__nothrow__ )); extern float __log1pf (float __x) __attribute__ ((__nothrow__ )); extern float logbf (float __x) __attribute__ ((__nothrow__ )); extern float __logbf (float __x) __attribute__ ((__nothrow__ )); extern float exp2f (float __x) __attribute__ ((__nothrow__ )); extern float __exp2f (float __x) __attribute__ ((__nothrow__ )); extern float log2f (float __x) __attribute__ ((__nothrow__ )); extern float __log2f (float __x) __attribute__ ((__nothrow__ )); extern float powf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float __powf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float sqrtf (float __x) __attribute__ ((__nothrow__ )); extern float __sqrtf (float __x) __attribute__ ((__nothrow__ )); extern float hypotf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float __hypotf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float cbrtf (float __x) __attribute__ ((__nothrow__ )); extern float __cbrtf (float __x) __attribute__ ((__nothrow__ )); extern float ceilf (float __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float __ceilf (float __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float fabsf (float __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float __fabsf (float __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float floorf (float __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float __floorf (float __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float fmodf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float __fmodf (float __x, float __y) __attribute__ ((__nothrow__ )); # 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern int isinff (float __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int finitef (float __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float dremf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float __dremf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float significandf (float __x) __attribute__ ((__nothrow__ )); extern float __significandf (float __x) __attribute__ ((__nothrow__ )); extern float copysignf (float __x, float __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float __copysignf (float __x, float __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float nanf (const char *__tagb) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float __nanf (const char *__tagb) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); # 211 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern int isnanf (float __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float j0f (float) __attribute__ ((__nothrow__ )); extern float __j0f (float) __attribute__ ((__nothrow__ )); extern float j1f (float) __attribute__ ((__nothrow__ )); extern float __j1f (float) __attribute__ ((__nothrow__ )); extern float jnf (int, float) __attribute__ ((__nothrow__ )); extern float __jnf (int, float) __attribute__ ((__nothrow__ )); extern float y0f (float) __attribute__ ((__nothrow__ )); extern float __y0f (float) __attribute__ ((__nothrow__ )); extern float y1f (float) __attribute__ ((__nothrow__ )); extern float __y1f (float) __attribute__ ((__nothrow__ )); extern float ynf (int, float) __attribute__ ((__nothrow__ )); extern float __ynf (int, float) __attribute__ ((__nothrow__ )); extern float erff (float) __attribute__ ((__nothrow__ )); extern float __erff (float) __attribute__ ((__nothrow__ )); extern float erfcf (float) __attribute__ ((__nothrow__ )); extern float __erfcf (float) __attribute__ ((__nothrow__ )); extern float lgammaf (float) __attribute__ ((__nothrow__ )); extern float __lgammaf (float) __attribute__ ((__nothrow__ )); extern float tgammaf (float) __attribute__ ((__nothrow__ )); extern float __tgammaf (float) __attribute__ ((__nothrow__ )); extern float gammaf (float) __attribute__ ((__nothrow__ )); extern float __gammaf (float) __attribute__ ((__nothrow__ )); extern float lgammaf_r (float, int *__signgamp) __attribute__ ((__nothrow__ )); extern float __lgammaf_r (float, int *__signgamp) __attribute__ ((__nothrow__ )); extern float rintf (float __x) __attribute__ ((__nothrow__ )); extern float __rintf (float __x) __attribute__ ((__nothrow__ )); extern float nextafterf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float __nextafterf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float nexttowardf (float __x, long double __y) __attribute__ ((__nothrow__ )); extern float __nexttowardf (float __x, long double __y) __attribute__ ((__nothrow__ )); # 272 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern float remainderf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float __remainderf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float scalbnf (float __x, int __n) __attribute__ ((__nothrow__ )); extern float __scalbnf (float __x, int __n) __attribute__ ((__nothrow__ )); extern int ilogbf (float __x) __attribute__ ((__nothrow__ )); extern int __ilogbf (float __x) __attribute__ ((__nothrow__ )); # 290 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern float scalblnf (float __x, long int __n) __attribute__ ((__nothrow__ )); extern float __scalblnf (float __x, long int __n) __attribute__ ((__nothrow__ )); extern float nearbyintf (float __x) __attribute__ ((__nothrow__ )); extern float __nearbyintf (float __x) __attribute__ ((__nothrow__ )); extern float roundf (float __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float __roundf (float __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float truncf (float __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float __truncf (float __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float remquof (float __x, float __y, int *__quo) __attribute__ ((__nothrow__ )); extern float __remquof (float __x, float __y, int *__quo) __attribute__ ((__nothrow__ )); extern long int lrintf (float __x) __attribute__ ((__nothrow__ )); extern long int __lrintf (float __x) __attribute__ ((__nothrow__ )); __extension__ extern long long int llrintf (float __x) __attribute__ ((__nothrow__ )); extern long long int __llrintf (float __x) __attribute__ ((__nothrow__ )); extern long int lroundf (float __x) __attribute__ ((__nothrow__ )); extern long int __lroundf (float __x) __attribute__ ((__nothrow__ )); __extension__ extern long long int llroundf (float __x) __attribute__ ((__nothrow__ )); extern long long int __llroundf (float __x) __attribute__ ((__nothrow__ )); extern float fdimf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float __fdimf (float __x, float __y) __attribute__ ((__nothrow__ )); extern float fmaxf (float __x, float __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float __fmaxf (float __x, float __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float fminf (float __x, float __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float __fminf (float __x, float __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern float fmaf (float __x, float __y, float __z) __attribute__ ((__nothrow__ )); extern float __fmaf (float __x, float __y, float __z) __attribute__ ((__nothrow__ )); # 396 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern float scalbf (float __x, float __n) __attribute__ ((__nothrow__ )); extern float __scalbf (float __x, float __n) __attribute__ ((__nothrow__ )); # 308 "/usr/include/math.h" 2 3 4 # 349 "/usr/include/math.h" 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls-helper-functions.h" 1 3 4 # 21 "/usr/include/x86_64-linux-gnu/bits/mathcalls-helper-functions.h" 3 4 extern int __fpclassifyl (long double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __signbitl (long double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __isinfl (long double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __finitel (long double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __isnanl (long double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int __iseqsigl (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern int __issignalingl (long double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); # 350 "/usr/include/math.h" 2 3 4 # 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 1 3 4 # 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern long double acosl (long double __x) __attribute__ ((__nothrow__ )); extern long double __acosl (long double __x) __attribute__ ((__nothrow__ )); extern long double asinl (long double __x) __attribute__ ((__nothrow__ )); extern long double __asinl (long double __x) __attribute__ ((__nothrow__ )); extern long double atanl (long double __x) __attribute__ ((__nothrow__ )); extern long double __atanl (long double __x) __attribute__ ((__nothrow__ )); extern long double atan2l (long double __y, long double __x) __attribute__ ((__nothrow__ )); extern long double __atan2l (long double __y, long double __x) __attribute__ ((__nothrow__ )); extern long double cosl (long double __x) __attribute__ ((__nothrow__ )); extern long double __cosl (long double __x) __attribute__ ((__nothrow__ )); extern long double sinl (long double __x) __attribute__ ((__nothrow__ )); extern long double __sinl (long double __x) __attribute__ ((__nothrow__ )); extern long double tanl (long double __x) __attribute__ ((__nothrow__ )); extern long double __tanl (long double __x) __attribute__ ((__nothrow__ )); extern long double coshl (long double __x) __attribute__ ((__nothrow__ )); extern long double __coshl (long double __x) __attribute__ ((__nothrow__ )); extern long double sinhl (long double __x) __attribute__ ((__nothrow__ )); extern long double __sinhl (long double __x) __attribute__ ((__nothrow__ )); extern long double tanhl (long double __x) __attribute__ ((__nothrow__ )); extern long double __tanhl (long double __x) __attribute__ ((__nothrow__ )); # 85 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern long double acoshl (long double __x) __attribute__ ((__nothrow__ )); extern long double __acoshl (long double __x) __attribute__ ((__nothrow__ )); extern long double asinhl (long double __x) __attribute__ ((__nothrow__ )); extern long double __asinhl (long double __x) __attribute__ ((__nothrow__ )); extern long double atanhl (long double __x) __attribute__ ((__nothrow__ )); extern long double __atanhl (long double __x) __attribute__ ((__nothrow__ )); extern long double expl (long double __x) __attribute__ ((__nothrow__ )); extern long double __expl (long double __x) __attribute__ ((__nothrow__ )); extern long double frexpl (long double __x, int *__exponent) __attribute__ ((__nothrow__ )); extern long double __frexpl (long double __x, int *__exponent) __attribute__ ((__nothrow__ )); extern long double ldexpl (long double __x, int __exponent) __attribute__ ((__nothrow__ )); extern long double __ldexpl (long double __x, int __exponent) __attribute__ ((__nothrow__ )); extern long double logl (long double __x) __attribute__ ((__nothrow__ )); extern long double __logl (long double __x) __attribute__ ((__nothrow__ )); extern long double log10l (long double __x) __attribute__ ((__nothrow__ )); extern long double __log10l (long double __x) __attribute__ ((__nothrow__ )); extern long double modfl (long double __x, long double *__iptr) __attribute__ ((__nothrow__ )); extern long double __modfl (long double __x, long double *__iptr) __attribute__ ((__nothrow__ )) __attribute__ ((__nonnull__ (2))); # 119 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern long double expm1l (long double __x) __attribute__ ((__nothrow__ )); extern long double __expm1l (long double __x) __attribute__ ((__nothrow__ )); extern long double log1pl (long double __x) __attribute__ ((__nothrow__ )); extern long double __log1pl (long double __x) __attribute__ ((__nothrow__ )); extern long double logbl (long double __x) __attribute__ ((__nothrow__ )); extern long double __logbl (long double __x) __attribute__ ((__nothrow__ )); extern long double exp2l (long double __x) __attribute__ ((__nothrow__ )); extern long double __exp2l (long double __x) __attribute__ ((__nothrow__ )); extern long double log2l (long double __x) __attribute__ ((__nothrow__ )); extern long double __log2l (long double __x) __attribute__ ((__nothrow__ )); extern long double powl (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double __powl (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double sqrtl (long double __x) __attribute__ ((__nothrow__ )); extern long double __sqrtl (long double __x) __attribute__ ((__nothrow__ )); extern long double hypotl (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double __hypotl (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double cbrtl (long double __x) __attribute__ ((__nothrow__ )); extern long double __cbrtl (long double __x) __attribute__ ((__nothrow__ )); extern long double ceill (long double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double __ceill (long double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double fabsl (long double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double __fabsl (long double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double floorl (long double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double __floorl (long double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double fmodl (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double __fmodl (long double __x, long double __y) __attribute__ ((__nothrow__ )); # 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern int isinfl (long double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern int finitel (long double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double dreml (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double __dreml (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double significandl (long double __x) __attribute__ ((__nothrow__ )); extern long double __significandl (long double __x) __attribute__ ((__nothrow__ )); extern long double copysignl (long double __x, long double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double __copysignl (long double __x, long double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double nanl (const char *__tagb) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double __nanl (const char *__tagb) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); # 211 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern int isnanl (long double __value) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double j0l (long double) __attribute__ ((__nothrow__ )); extern long double __j0l (long double) __attribute__ ((__nothrow__ )); extern long double j1l (long double) __attribute__ ((__nothrow__ )); extern long double __j1l (long double) __attribute__ ((__nothrow__ )); extern long double jnl (int, long double) __attribute__ ((__nothrow__ )); extern long double __jnl (int, long double) __attribute__ ((__nothrow__ )); extern long double y0l (long double) __attribute__ ((__nothrow__ )); extern long double __y0l (long double) __attribute__ ((__nothrow__ )); extern long double y1l (long double) __attribute__ ((__nothrow__ )); extern long double __y1l (long double) __attribute__ ((__nothrow__ )); extern long double ynl (int, long double) __attribute__ ((__nothrow__ )); extern long double __ynl (int, long double) __attribute__ ((__nothrow__ )); extern long double erfl (long double) __attribute__ ((__nothrow__ )); extern long double __erfl (long double) __attribute__ ((__nothrow__ )); extern long double erfcl (long double) __attribute__ ((__nothrow__ )); extern long double __erfcl (long double) __attribute__ ((__nothrow__ )); extern long double lgammal (long double) __attribute__ ((__nothrow__ )); extern long double __lgammal (long double) __attribute__ ((__nothrow__ )); extern long double tgammal (long double) __attribute__ ((__nothrow__ )); extern long double __tgammal (long double) __attribute__ ((__nothrow__ )); extern long double gammal (long double) __attribute__ ((__nothrow__ )); extern long double __gammal (long double) __attribute__ ((__nothrow__ )); extern long double lgammal_r (long double, int *__signgamp) __attribute__ ((__nothrow__ )); extern long double __lgammal_r (long double, int *__signgamp) __attribute__ ((__nothrow__ )); extern long double rintl (long double __x) __attribute__ ((__nothrow__ )); extern long double __rintl (long double __x) __attribute__ ((__nothrow__ )); extern long double nextafterl (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double __nextafterl (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double nexttowardl (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double __nexttowardl (long double __x, long double __y) __attribute__ ((__nothrow__ )); # 272 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern long double remainderl (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double __remainderl (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double scalbnl (long double __x, int __n) __attribute__ ((__nothrow__ )); extern long double __scalbnl (long double __x, int __n) __attribute__ ((__nothrow__ )); extern int ilogbl (long double __x) __attribute__ ((__nothrow__ )); extern int __ilogbl (long double __x) __attribute__ ((__nothrow__ )); # 290 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern long double scalblnl (long double __x, long int __n) __attribute__ ((__nothrow__ )); extern long double __scalblnl (long double __x, long int __n) __attribute__ ((__nothrow__ )); extern long double nearbyintl (long double __x) __attribute__ ((__nothrow__ )); extern long double __nearbyintl (long double __x) __attribute__ ((__nothrow__ )); extern long double roundl (long double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double __roundl (long double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double truncl (long double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double __truncl (long double __x) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double remquol (long double __x, long double __y, int *__quo) __attribute__ ((__nothrow__ )); extern long double __remquol (long double __x, long double __y, int *__quo) __attribute__ ((__nothrow__ )); extern long int lrintl (long double __x) __attribute__ ((__nothrow__ )); extern long int __lrintl (long double __x) __attribute__ ((__nothrow__ )); __extension__ extern long long int llrintl (long double __x) __attribute__ ((__nothrow__ )); extern long long int __llrintl (long double __x) __attribute__ ((__nothrow__ )); extern long int lroundl (long double __x) __attribute__ ((__nothrow__ )); extern long int __lroundl (long double __x) __attribute__ ((__nothrow__ )); __extension__ extern long long int llroundl (long double __x) __attribute__ ((__nothrow__ )); extern long long int __llroundl (long double __x) __attribute__ ((__nothrow__ )); extern long double fdiml (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double __fdiml (long double __x, long double __y) __attribute__ ((__nothrow__ )); extern long double fmaxl (long double __x, long double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double __fmaxl (long double __x, long double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double fminl (long double __x, long double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double __fminl (long double __x, long double __y) __attribute__ ((__nothrow__ )) __attribute__ ((__const__)); extern long double fmal (long double __x, long double __y, long double __z) __attribute__ ((__nothrow__ )); extern long double __fmal (long double __x, long double __y, long double __z) __attribute__ ((__nothrow__ )); # 396 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" 3 4 extern long double scalbl (long double __x, long double __n) __attribute__ ((__nothrow__ )); extern long double __scalbl (long double __x, long double __n) __attribute__ ((__nothrow__ )); # 351 "/usr/include/math.h" 2 3 4 # 489 "/usr/include/math.h" 3 4 extern int signgam; # 569 "/usr/include/math.h" 3 4 enum { FP_NAN = 0, FP_INFINITE = 1, FP_ZERO = 2, FP_SUBNORMAL = 3, FP_NORMAL = 4 }; # 10 "qla-1.7.1/QLA_D3_V_vpeq_M_times_pV.c" 2 void QLA_D3_V_vpeq_M_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int n) { #pragma omp parallel for for(int i=0; i<n; i++) { for(int i_c=0; i_c<3; i_c++) { QLA_D_Complex x; (x) = (((r[i])[i_c])); for(int k_c=0; k_c<3; k_c++) { (x) += (((a[i])[i_c][k_c])) * (((*b[i])[k_c])); } (((r[i])[i_c])) = (x); } } }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
core_zhemm.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_hemm * * Performs one of the matrix-matrix operations * * \f[ C = \alpha \times A \times B + \beta \times C \f] * or * \f[ C = \alpha \times B \times A + \beta \times C \f] * * where alpha and beta are scalars, A is a Hermitian matrix and B and * C are m-by-n matrices. * ******************************************************************************* * * @param[in] side * Specifies whether the Hermitian matrix A appears on the * left or right in the operation as follows: * - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f] * - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f] * * @param[in] uplo * Specifies whether the upper or lower triangular part of * the Hermitian matrix A is to be referenced as follows: * - PlasmaLower: Only the lower triangular part of the * Hermitian matrix A is to be referenced. * - PlasmaUpper: Only the upper triangular part of the * Hermitian matrix A is to be referenced. * * @param[in] m * The number of rows of the matrix C. m >= 0. * * @param[in] n * The number of columns of the matrix C. n >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] A * A is an lda-by-ka matrix, where ka is m when side = PlasmaLeft, * and is n otherwise. Only the uplo triangular part is referenced. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,ka). * * @param[in] B * B is an ldb-by-n matrix, where the leading m-by-n part of * the array B must contain the matrix B. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,m). * * @param[in] beta * The scalar beta. * * @param[in,out] C * C is an ldc-by-n matrix. * On exit, the array is overwritten by the m-by-n updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_zhemm(plasma_enum_t side, plasma_enum_t uplo, int m, int n, plasma_complex64_t alpha, const plasma_complex64_t *A, int lda, const plasma_complex64_t *B, int ldb, plasma_complex64_t beta, plasma_complex64_t *C, int ldc) { cblas_zhemm(CblasColMajor, (CBLAS_SIDE)side, (CBLAS_UPLO)uplo, m, n, CBLAS_SADDR(alpha), A, lda, B, ldb, CBLAS_SADDR(beta), C, ldc); } /******************************************************************************/ void plasma_core_omp_zhemm( plasma_enum_t side, plasma_enum_t uplo, int m, int n, plasma_complex64_t alpha, const plasma_complex64_t *A, int lda, const plasma_complex64_t *B, int ldb, plasma_complex64_t beta, plasma_complex64_t *C, int ldc, plasma_sequence_t *sequence, plasma_request_t *request) { int ak; if (side == PlasmaLeft) ak = m; else ak = n; #pragma omp task depend(in:A[0:lda*ak]) \ depend(in:B[0:ldb*n]) \ depend(inout:C[0:ldc*n]) { if (sequence->status == PlasmaSuccess) plasma_core_zhemm(side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc); } }
DRB052-indirectaccesssharebase-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is to mimic a memory access pattern extracted from an LLNL proxy app. Two pointers have distance of 12. They are used as base addresses of two arrays, indexed through an index set. The index set has no two indices with distance of 12. So there is no loop carried dependence. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 #include <omp.h> int indexSet[180] = {(521), (523), (525), (527), (529), (531), (547), (549), (551), (553), (555), (557), (573), (575), (577), (579), (581), (583), (599), (601), (603), (605), (607), (609), (625), (627), (629), (631), (633), (635), (651), (653), (655), (657), (659), (661), (859), (861), (863), (865), (867), (869), (885), (887), (889), (891), (893), (895), (911), (913), (915), (917), (919), (921), (937), (939), (941), (943), (945), (947), (963), (965), (967), (969), (971), (973), (989), (991), (993), (995), (997), (999), (1197), (1199), (1201), (1203), (1205), (1207), (1223), (1225), (1227), (1229), (1231), (1233), (1249), (1251), (1253), (1255), (1257), (1259), (1275), (1277), (1279), (1281), (1283), (1285), (1301), (1303), (1305), (1307), (1309), (1311), (1327), (1329), (1331), (1333), (1335), (1337), (1535), (1537), (1539), (1541), (1543), (1545), (1561), (1563), (1565), (1567), (1569), (1571), (1587), (1589), (1591), (1593), (1595), (1597), (1613), (1615), (1617), (1619), (1621), (1623), (1639), (1641), (1643), (1645), (1647), (1649), (1665), (1667), (1669), (1671), (1673), (1675), (1873), (1875), (1877), (1879), (1881), (1883), (1899), (1901), (1903), (1905), (1907), (1909), (1925), (1927), (1929), (1931), (1933), (1935), (1951), (1953), (1955), (1957), (1959), (1961), (1977), (1979), (1981), (1983), (1985), (1987), (2003), (2005), (2007), (2009), (2011), (2013)}; int main(int argc,char *argv[]) { double *base = (double *)(malloc(sizeof(double ) * (2013 + 1 + 12))); if (base == 0) { printf("Error, malloc() returns NULL. End execution. \n"); return 1; } double *xa1 = base; double *xa2 = base + 1; int i; #pragma omp parallel for private (i) for (i = 521; i <= 2025; i += 1) { base[i] = 0.0; } // this level of loop has no loop carried dependence for (i = 0; i <= 179; i += 1) { int idx = indexSet[i]; xa1[idx] += 4.0; xa2[idx] += 4.0; } // verify the results, no overlapping of xa1 vs. xa2, no addition happens to the same element twice for (i = 521; i <= 2025; i += 1) { printf("%f ",base[i]); //assert (base[i]!=4.0); } free(base); return 0; }
maxwell_zeroBC.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_sstruct_ls.h" HYPRE_Int hypre_ParVectorZeroBCValues(hypre_ParVector *v, HYPRE_Int *rows, HYPRE_Int nrows) { HYPRE_Int ierr= 0; hypre_Vector *v_local = hypre_ParVectorLocalVector(v); hypre_SeqVectorZeroBCValues(v_local, rows, nrows); return ierr; } HYPRE_Int hypre_SeqVectorZeroBCValues(hypre_Vector *v, HYPRE_Int *rows, HYPRE_Int nrows) { HYPRE_Real *vector_data = hypre_VectorData(v); HYPRE_Int i; HYPRE_Int ierr = 0; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < nrows; i++) vector_data[rows[i]]= 0.0; return ierr; }
GB_unaryop__ainv_bool_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_bool // op(A') function: GB_tran__ainv_bool_bool // C type: bool // A type: bool // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_bool ( bool *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
omp.c
#include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { omp_set_num_threads(16); int num_threads; int thread_num; const double t1 = omp_get_wtime(); #pragma omp parallel private(num_threads, thread_num) { num_threads = omp_get_num_threads(); thread_num = omp_get_thread_num(); printf("OpenMP thread %d from %d threads \n", thread_num, num_threads); } double t2 = omp_get_wtime(); // Print time. printf("total time: %f\n", (t2 - t1)); return 0; }
sample_task_single_producer.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * See LICENSE.txt in top-level directory. */ #include <omp.h> #include <stdio.h> #include <sys/time.h> int main(int argc, char * argv[]) { int i,num=(argc>1)?atoi(argv[1]):100; int nthreads; struct timeval t_start, t_end; double time; double *a = (double *)malloc(sizeof(double)*num); #pragma omp parallel { nthreads=omp_get_num_threads(); } for(i=0;i<num;i++){ a[i]=i; } gettimeofday(&t_start,NULL); #pragma omp parallel { #pragma omp single { for(i=0;i<num;i++){ #pragma omp task { a[i]*=0.9; } } } } gettimeofday(&t_end,NULL); time=(t_end.tv_sec * 1000000 + t_end.tv_usec) - (t_start.tv_sec * 1000000 + t_start.tv_usec); printf("%d %f\n",nthreads,time/1000000.0); for(i=0;i<num;i++){ if(a[i]!=i*0.9){ printf("a[%d]=%f != %f\n",i,a[i],i*0.9); return 1; } } }
GB_binop__eq_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint8) // A*D function (colscale): GB (_AxD__eq_uint8) // D*A function (rowscale): GB (_DxB__eq_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint8) // C=scalar+B GB (_bind1st__eq_uint8) // C=scalar+B' GB (_bind1st_tran__eq_uint8) // C=A+scalar GB (_bind2nd__eq_uint8) // C=A'+scalar GB (_bind2nd_tran__eq_uint8) // C type: bool // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT8 || GxB_NO_EQ_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
comms.h
/* //@HEADER // ***************************************************************************** // // XtraPuLP: Xtreme-Scale Graph Partitioning using Label Propagation // Copyright (2016) Sandia Corporation // // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact George M. Slota (gmslota@sandia.gov) // Siva Rajamanickam (srajama@sandia.gov) // Kamesh Madduri (madduri@cse.psu.edu) // // ***************************************************************************** //@HEADER */ #ifndef _COMMS_H_ #define _COMMS_H_ #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #include "bicc_dist.h" #include "util.h" extern int procid, nprocs; extern bool verbose, debug, verify; #define MAX_SEND_SIZE 2147483648 #define THREAD_QUEUE_SIZE 1024 struct mpi_data_t { int32_t* sendcounts; uint64_t* sendcounts_temp; int32_t* recvcounts; uint64_t* recvcounts_temp; int32_t* sdispls; int32_t* rdispls; int32_t* sdispls_cpy; uint64_t* sdispls_temp; uint64_t* sendbuf_vert; int32_t* sendbuf_data; uint64_t* recvbuf_vert; int32_t* recvbuf_data; uint64_t total_recv; uint64_t total_send; uint64_t global_queue_size; }; struct queue_data_t { uint64_t* queue; uint64_t* queue_next; uint64_t* queue_send; uint64_t queue_size; uint64_t next_size; uint64_t send_size; }; struct thread_queue_t { int32_t tid; uint64_t* thread_queue; uint64_t* thread_send; uint64_t thread_queue_size; uint64_t thread_send_size; } ; struct thread_comm_t { int32_t tid; bool* v_to_rank; uint64_t* sendcounts_thread; uint64_t* sendbuf_vert_thread; int32_t* sendbuf_data_thread ; int32_t* sendbuf_rank_thread; uint64_t* thread_starts; uint64_t thread_queue_size; }; void init_queue_data(dist_graph_t* g, queue_data_t* q); void clear_queue_data(queue_data_t* q); void init_comm_data(mpi_data_t* comm); void clear_comm_data(mpi_data_t* comm); void init_thread_queue(thread_queue_t* tq); void clear_thread_queue(thread_queue_t* tq); void init_thread_comm(thread_comm_t* tc); void clear_thread_comm(thread_comm_t* tc); void init_sendbuf_vid_data(mpi_data_t* comm); void clear_recvbuf_vid_data(mpi_data_t* comm); inline void exchange_verts(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q); inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q); inline void update_sendcounts_thread(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index); inline void update_vid_data_queues(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, int32_t* data); inline void update_sendcounts_thread_ghost(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index); inline void update_vid_data_queues_ghost(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, int32_t* data); inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id); inline void empty_queue(thread_queue_t* tq, queue_data_t* q); inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id); inline void empty_send(thread_queue_t* tq, queue_data_t* q); inline void add_vid_data_to_send(thread_comm_t* tc, mpi_data_t* comm, uint64_t vertex_id, int32_t data_val, int32_t send_rank); inline void empty_vid_data(thread_comm_t* tc, mpi_data_t* comm); inline void exchange_verts(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q) { comm->global_queue_size = 0; uint64_t task_queue_size = q->next_size + q->send_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; for (uint64_t c = 0; c < num_comms; ++c) { uint64_t send_begin = (q->send_size * c) / num_comms; uint64_t send_end = (q->send_size * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = q->send_size; for (int32_t i = 0; i < nprocs; ++i) { comm->sendcounts[i] = 0; comm->recvcounts[i] = 0; } for (uint64_t i = send_begin; i < send_end; ++i) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; ++comm->sendcounts[ghost_task]; } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; comm->sendbuf_vert = (uint64_t*)malloc((uint64_t)(cur_send+1)*sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (uint64_t i = send_begin; i < send_end; ++i) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; uint64_t vert = g->ghost_unmap[ghost_index]; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = vert; } MPI_Alltoallv(comm->sendbuf_vert, comm->sendcounts, comm->sdispls, MPI_UINT64_T, q->queue_next+q->next_size+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(comm->sendbuf_vert); sum_recv += cur_recv; } q->queue_size = q->next_size + sum_recv; q->next_size = 0; q->send_size = 0; uint64_t* temp = q->queue; q->queue = q->queue_next; q->queue_next = temp; } inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q) { for (int32_t i = 0; i < nprocs; ++i) comm->recvcounts_temp[i] = 0; for (int32_t i = 0; i < nprocs; ++i) comm->sdispls_temp[i] -= comm->sendcounts_temp[i]; MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T, comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD); comm->total_recv = 0; for (int i = 0; i < nprocs; ++i) comm->total_recv += comm->recvcounts_temp[i]; comm->recvbuf_vert = (uint64_t*)malloc(comm->total_recv*sizeof(uint64_t)); comm->recvbuf_data = (int32_t*)malloc(comm->total_recv*sizeof(uint32_t)); if (comm->recvbuf_vert == NULL || comm->sendbuf_vert == NULL) throw_err("exchange_vert_data() unable to allocate comm buffers", procid); comm->global_queue_size = 0; uint64_t task_queue_size = comm->total_send; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t)(send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; uint64_t* buf_v = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t)); int32_t* buf_d = (int32_t*)malloc((int32_t)(cur_send)*sizeof(int32_t)); if (buf_v == NULL || buf_d == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i]+j]; int32_t data = comm->sendbuf_data[comm->sdispls_temp[i]+j]; buf_v[comm->sdispls_cpy[i]] = vert; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_INT32_T, comm->recvbuf_data+sum_recv, comm->recvcounts, comm->rdispls, MPI_INT32_T, MPI_COMM_WORLD); free(buf_v); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } free(comm->sendbuf_data); free(comm->sendbuf_vert); assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); comm->global_queue_size = 0; task_queue_size = comm->total_recv + q->next_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); q->queue_size = task_queue_size; q->next_size = 0; q->send_size = 0; } inline void update_sendcounts_thread(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t* outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index-g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; ++tc->sendcounts_thread[out_rank]; } } } } inline void update_sendcounts_thread_ghost(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index) { int32_t vert_rank = g->ghost_tasks[vert_index-g->n_local]; ++tc->sendcounts_thread[vert_rank]; } inline void update_vid_data_queues(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, int32_t data) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t* outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data, out_rank); } } } } inline void update_vid_data_queues_ghost(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, int32_t data) { int32_t vert_rank = g->ghost_tasks[vert_index - g->n_local]; add_vid_data_to_send(tc, comm, g->ghost_unmap[vert_index - g->n_local], data, vert_rank); } inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id) { tq->thread_queue[tq->thread_queue_size++] = vertex_id; if (tq->thread_queue_size == THREAD_QUEUE_SIZE) { uint64_t start_offset; #pragma omp atomic capture start_offset = q->next_size += THREAD_QUEUE_SIZE; start_offset -= THREAD_QUEUE_SIZE; for (uint64_t i = 0; i < THREAD_QUEUE_SIZE; ++i) q->queue_next[start_offset + i] = tq->thread_queue[i]; tq->thread_queue_size = 0; } } inline void add_vids_to_queue(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id1, uint64_t vertex_id2) { tq->thread_queue[tq->thread_queue_size++] = vertex_id1; tq->thread_queue[tq->thread_queue_size++] = vertex_id2; if (tq->thread_queue_size == THREAD_QUEUE_SIZE) { uint64_t start_offset; #pragma omp atomic capture start_offset = q->next_size += THREAD_QUEUE_SIZE; start_offset -= THREAD_QUEUE_SIZE; for (uint64_t i = 0; i < THREAD_QUEUE_SIZE; ++i) q->queue_next[start_offset + i] = tq->thread_queue[i]; tq->thread_queue_size = 0; } } inline void empty_queue(thread_queue_t* tq, queue_data_t* q) { uint64_t start_offset; #pragma omp atomic capture start_offset = q->next_size += tq->thread_queue_size; start_offset -= tq->thread_queue_size; for (uint64_t i = 0; i < tq->thread_queue_size; ++i) q->queue_next[start_offset + i] = tq->thread_queue[i]; tq->thread_queue_size = 0; } inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id) { tq->thread_send[tq->thread_send_size++] = vertex_id; if (tq->thread_send_size == THREAD_QUEUE_SIZE) { uint64_t start_offset; #pragma omp atomic capture start_offset = q->send_size += tq->thread_send_size; start_offset -= tq->thread_send_size; for (uint64_t i = 0; i < THREAD_QUEUE_SIZE; ++i) q->queue_send[start_offset + i] = tq->thread_send[i]; tq->thread_send_size = 0; } } inline void empty_send(thread_queue_t* tq, queue_data_t* q) { uint64_t start_offset; #pragma omp atomic capture start_offset = q->send_size += tq->thread_send_size; start_offset -= tq->thread_send_size; for (uint64_t i = 0; i < tq->thread_send_size; ++i) q->queue_send[start_offset + i] = tq->thread_send[i]; tq->thread_send_size = 0; } inline void add_vid_data_to_send(thread_comm_t* tc, mpi_data_t* comm, uint64_t vertex_id, int32_t data_val, int32_t send_rank) { tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id; tc->sendbuf_data_thread[tc->thread_queue_size] = data_val; tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; ++tc->thread_queue_size; ++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size == THREAD_QUEUE_SIZE) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_data[tc->thread_starts[cur_rank]] = tc->sendbuf_data_thread[i]; ++tc->thread_starts[cur_rank]; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } } inline void empty_vid_data(thread_comm_t* tc, mpi_data_t* comm) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_data[tc->thread_starts[cur_rank]] = tc->sendbuf_data_thread[i]; ++tc->thread_starts[cur_rank]; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } #endif
sum_openmp.c
//sum.c #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define N_RUNS 10000 #define N 1200000 // read timer in second double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } //Create a matrix and a vector and fill with random numbers void init(float *X) { for (int i = 0; i<N; i++) { X[i] = (float)rand()/(float)(RAND_MAX/10.0); } } //Our sum function- what it does is pretty straight-forward. float sum(float *X) { float result = 0; #pragma omp simd reduction(+:result) for (int i = 0; i<N; i++) { result += X[i]; } return result; } int main(int argc, char **argv) { //Set everything up float *X = malloc(sizeof(float)*N); float result; srand(time(NULL)); init(X); double start = read_timer(); for (int i = 0; i<N_RUNS; i++) result = sum(X); double t = (read_timer() - start); double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t); printf("==================================================================\n"); printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n"); printf("------------------------------------------------------------------\n"); printf("Sum (SIMD):\t\t%4f\t%4f\n", t, gflops); free(X); return 0; }
omp_test1.c
#include <stdio.h> #include <omp.h> int main(int argc, char *argv[]) { int j = 0; #pragma omp parallel #pragma omp for for ( j = 0; j < 4; j++ ){ printf("j = %d, ThreadId = %d\n", j, omp_get_thread_num()); } }
GB_AxB_colscale_template.c
//------------------------------------------------------------------------------ // GB_AxB_colscale_template: C=A*D where D is a square diagonal matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // This template is not used If C is iso, since all that is needed is to create // C as a shallow-copy of the pattern of A. // A and C can be jumbled. D cannot, but it is a diagonal matrix so it is // never jumbled. { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (GB_JUMBLED_OK (C)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_JUMBLED (D)) ; ASSERT (!C->iso) ; //-------------------------------------------------------------------------- // get C, A, and D //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; #if !GB_A_IS_PATTERN const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ; #endif #if !GB_B_IS_PATTERN const GB_BTYPE *restrict Dx = (GB_BTYPE *) D->x ; #endif const int64_t avlen = A->vlen ; const bool A_iso = A->iso ; const bool D_iso = D->iso ; const int64_t *restrict kfirst_Aslice = A_ek_slicing ; const int64_t *restrict klast_Aslice = A_ek_slicing + A_ntasks ; const int64_t *restrict pstart_Aslice = A_ek_slicing + A_ntasks * 2 ; //-------------------------------------------------------------------------- // C=A*D //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; //---------------------------------------------------------------------- // C(:,kfirst:klast) = A(:,kfirst:klast)*D(kfirst:klast,kfirst:klast) //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of A(:,k) and C(:,k) to be operated on by this task //------------------------------------------------------------------ int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, tid, k, kfirst, klast, pstart_Aslice, Ap, avlen) ; //------------------------------------------------------------------ // C(:,j) = A(:,j)*D(j,j) //------------------------------------------------------------------ GB_GETB (djj, Dx, j, D_iso) ; // djj = D (j,j) GB_PRAGMA_SIMD_VECTORIZE for (int64_t p = pA_start ; p < pA_end ; p++) { GB_GETA (aij, Ax, p, A_iso) ; // aij = A(i,j) GB_BINOP (GB_CX (p), aij, djj, 0, 0) ; // C(i,j) = aij * djj } } } }
Example_private.2.c
/* * @@name: private.2c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ int a; void g(int k) { a = k; /* Accessed in the region but outside of the construct; * therefore unspecified whether original or private list * item is modified. */ } void f(int n) { int a = 0; #pragma omp parallel for private(a) for (int i=1; i<n; i++) { a = i; g(a*2); /* Private copy of "a" */ } }
GrB_BinaryOp_wait.c
//------------------------------------------------------------------------------ // GrB_BinaryOp_wait: wait for a user-defined GrB_BinaryOp to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // In SuiteSparse:GraphBLAS, a user-defined GrB_BinaryOp has no pending // operations to wait for. All this method does is verify that the op is // properly initialized, and then it does an OpenMP flush. #include "GB.h" GrB_Info GrB_BinaryOp_wait // no work, just check if the GrB_BinaryOp is valid ( GrB_BinaryOp *op ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #pragma omp flush GB_WHERE1 ("GrB_BinaryOp_wait (&op)") ; GB_RETURN_IF_NULL (op) ; GB_RETURN_IF_NULL_OR_FAULTY (*op) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
ccl_correlation.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_roots.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_sf_bessel.h> #include <gsl/gsl_sf_legendre.h> #include "ccl.h" /*--------ROUTINE: taper_cl ------ TASK:n Apply cosine tapering to Cls to reduce aliasing INPUT: number of ell bins for Cl, ell vector, C_ell vector, limits for tapering e.g., ell_limits=[low_ell_limit_lower,low_ell_limit_upper,high_ell_limit_lower,high_ell_limit_upper] */ static int taper_cl(int n_ell,double *ell,double *cl, double *ell_limits) { for(int i=0;i<n_ell;i++) { if(ell[i]<ell_limits[0] || ell[i]>ell_limits[3]) { cl[i]=0;//ell outside desirable range continue; } if(ell[i]>=ell_limits[1] && ell[i]<=ell_limits[2]) continue;//ell within good ell range if(ell[i]<ell_limits[1])//tapering low ell cl[i]*=cos((ell[i]-ell_limits[1])/(ell_limits[1]-ell_limits[0])*M_PI/2.); if(ell[i]>ell_limits[2])//tapering high ell cl[i]*=cos((ell[i]-ell_limits[2])/(ell_limits[3]-ell_limits[2])*M_PI/2.); } return 0; } /*--------ROUTINE: ccl_tracer_corr_fftlog ------ TASK: For a given tracer, get the correlation function Following function takes a function to calculate angular cl as well. By default above function will call it using ccl_angular_cl INPUT: type of tracer, number of theta values to evaluate = NL, theta vector */ static void ccl_tracer_corr_fftlog(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int do_taper_cl,double *taper_cl_limits, int *status) { int i; double *l_arr,*cl_arr,*th_arr,*wth_arr; l_arr=ccl_log_spacing(cosmo->spline_params.ELL_MIN_CORR,cosmo->spline_params.ELL_MAX_CORR,cosmo->spline_params.N_ELL_CORR); if(l_arr==NULL) { *status=CCL_ERROR_LINSPACE; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog ran out of memory\n"); return; } cl_arr=malloc(cosmo->spline_params.N_ELL_CORR*sizeof(double)); if(cl_arr==NULL) { free(l_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog ran out of memory\n"); return; } //Interpolate input Cl into array needed for FFTLog ccl_f1d_t *cl_spl=ccl_f1d_t_new(n_ell,ell,cls,cls[0],0, ccl_f1d_extrap_const, ccl_f1d_extrap_logx_logy, status); if (*status) { free(l_arr); free(cl_arr); ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_f1d_t_new " "failed to create spline\n"); if (cl_spl) ccl_f1d_t_free(cl_spl); return; } if(cl_spl==NULL) { free(l_arr); free(cl_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog " "ran out of memory\n"); return; } for(i=0;i<cosmo->spline_params.N_ELL_CORR;i++) cl_arr[i]=ccl_f1d_t_eval(cl_spl,l_arr[i]); ccl_f1d_t_free(cl_spl); if (do_taper_cl) taper_cl(cosmo->spline_params.N_ELL_CORR,l_arr,cl_arr,taper_cl_limits); th_arr=malloc(sizeof(double)*cosmo->spline_params.N_ELL_CORR); if(th_arr==NULL) { free(l_arr); free(cl_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog ran out of memory\n"); return; } wth_arr=(double *)malloc(sizeof(double)*cosmo->spline_params.N_ELL_CORR); if(wth_arr==NULL) { free(l_arr); free(cl_arr); free(th_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog ran out of memory\n"); return; } for(i=0;i<cosmo->spline_params.N_ELL_CORR;i++) th_arr[i]=0; //Although set here to 0, theta is modified by FFTlog to obtain the correlation at ~1/l int i_bessel=0; if(corr_type==CCL_CORR_GG) i_bessel=0; if(corr_type==CCL_CORR_GL) i_bessel=2; if(corr_type==CCL_CORR_LP) i_bessel=0; if(corr_type==CCL_CORR_LM) i_bessel=4; ccl_fftlog_ComputeXi2D(i_bessel,0, 1, cosmo->spline_params.N_ELL_CORR,l_arr,&cl_arr, th_arr,&wth_arr, status); // Interpolate to output values of theta ccl_f1d_t *wth_spl=ccl_f1d_t_new(cosmo->spline_params.N_ELL_CORR,th_arr, wth_arr,wth_arr[0],0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (wth_spl == NULL) { free(l_arr); free(cl_arr); free(th_arr); free(wth_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog ran out of memory\n"); return; } for(i=0;i<n_theta;i++) wtheta[i]=ccl_f1d_t_eval(wth_spl,theta[i]*M_PI/180.); ccl_f1d_t_free(wth_spl); free(l_arr); free(cl_arr); free(th_arr); free(wth_arr); return; } typedef struct { ccl_f1d_t *cl_spl; int i_bessel; double th; } corr_int_par; static double corr_bessel_integrand(double l,void *params) { double cl,jbes; corr_int_par *p=(corr_int_par *)params; double x=l*p->th; cl=ccl_f1d_t_eval(p->cl_spl,l); jbes=gsl_sf_bessel_Jn(p->i_bessel,x); return l*jbes*cl; } static void ccl_tracer_corr_bessel(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int *status) { corr_int_par cp; ccl_f1d_t *cl_spl = NULL; cl_spl = ccl_f1d_t_new(n_ell, ell, cls, cls[0], 0, ccl_f1d_extrap_const, ccl_f1d_extrap_logx_logy, status); if(cl_spl == NULL) { *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_tracer_corr_bessel ran out of memory\n"); return; } int ith, gslstatus; double result,eresult; gsl_function F; gsl_integration_workspace *w = NULL; int local_status; #pragma omp parallel default(none) \ shared(cosmo, status, wtheta, n_ell, ell, cls, \ corr_type, cl_spl, theta, n_theta) \ private(w, F, result, eresult, local_status, ith, \ gslstatus, cp) { local_status = *status; switch(corr_type) { case CCL_CORR_GG: cp.i_bessel = 0; break; case CCL_CORR_GL: cp.i_bessel = 2; break; case CCL_CORR_LP: cp.i_bessel = 0; break; case CCL_CORR_LM: cp.i_bessel = 4; break; } cp.cl_spl = cl_spl; w = gsl_integration_workspace_alloc(cosmo->gsl_params.N_ITERATION); if (w == NULL) { local_status = CCL_ERROR_MEMORY; } F.function = &corr_bessel_integrand; F.params = &cp; #pragma omp for schedule(dynamic) for(ith=0; ith < n_theta; ith++) { if (local_status == 0) { cp.th = theta[ith]*M_PI/180; //TODO: Split into intervals between first bessel zeros before integrating //This will help both speed and accuracy of the integral. gslstatus = gsl_integration_qag(&F, 0, cosmo->spline_params.ELL_MAX_CORR, 0, cosmo->gsl_params.INTEGRATION_EPSREL, cosmo->gsl_params.N_ITERATION, cosmo->gsl_params.INTEGRATION_GAUSS_KRONROD_POINTS, w, &result, &eresult); if(gslstatus != GSL_SUCCESS) { ccl_raise_gsl_warning(gslstatus, "ccl_correlation.c: ccl_tracer_corr_bessel():"); local_status |= gslstatus; } wtheta[ith] = result/(2*M_PI); } } if (local_status) { #pragma omp atomic write *status = local_status; } gsl_integration_workspace_free(w); } ccl_f1d_t_free(cl_spl); } /*--------ROUTINE: ccl_compute_legendre_polynomial ------ TASK: Compute input factor for ccl_tracer_corr_legendre INPUT: tracer 1, tracer 2, i_bessel, theta array, n_theta, L_max, output Pl_theta */ static void ccl_compute_legendre_polynomial(int corr_type,double theta,int ell_max,double *Pl_theta) { int i,j; double k=0; double cth=cos(theta*M_PI/180); //Initialize Pl_theta for (j=0;j<=ell_max;j++) Pl_theta[j]=0.; if(corr_type==CCL_CORR_GG) { gsl_sf_legendre_Pl_array(ell_max,cth,Pl_theta); for (j=0;j<=ell_max;j++) Pl_theta[j]*=(2*j+1); } else if(corr_type==CCL_CORR_GL) { for (j=2;j<=ell_max;j++) {//https://arxiv.org/pdf/1007.4809.pdf Pl_theta[j]=gsl_sf_legendre_Plm(j,2,cth); Pl_theta[j]*=(2*j+1.)/((j+0.)*(j+1.)); } } } /*--------ROUTINE: ccl_tracer_corr_legendre ------ TASK: Compute correlation function via Legendre polynomials INPUT: cosmology, number of theta bins, theta array, tracer 1, tracer 2, i_bessel, boolean for tapering, vector of tapering limits, correlation vector, angular_cl function. */ static void ccl_tracer_corr_legendre(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int do_taper_cl,double *taper_cl_limits, int *status) { int i; double *l_arr = NULL, *cl_arr = NULL, *Pl_theta = NULL; ccl_f1d_t *cl_spl; if(corr_type==CCL_CORR_LM || corr_type==CCL_CORR_LP){ *status=CCL_ERROR_NOT_IMPLEMENTED; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: CCL does not support full-sky xi+- calcuations.\nhttps://arxiv.org/abs/1702.05301 indicates flat-sky to be sufficient.\n"); } if(*status==0) { l_arr=malloc(((int)(cosmo->spline_params.ELL_MAX_CORR)+1)*sizeof(double)); if(l_arr==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_legendre ran out of memory\n"); } } if(*status==0) { cl_arr=malloc(((int)(cosmo->spline_params.ELL_MAX_CORR)+1)*sizeof(double)); if(cl_arr==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_legendre ran out of memory\n"); } } if(*status==0) { //Interpolate input Cl into cl_spl=ccl_f1d_t_new(n_ell,ell,cls,cls[0],0, ccl_f1d_extrap_const, ccl_f1d_extrap_logx_logy, status); if(cl_spl==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_legendre ran out of memory\n"); } } if(*status==0) { for(i=0;i<=(int)(cosmo->spline_params.ELL_MAX_CORR);i++) { double l=(double)i; l_arr[i]=l; cl_arr[i]=ccl_f1d_t_eval(cl_spl,l); } ccl_f1d_t_free(cl_spl); if (do_taper_cl) *status=taper_cl((int)(cosmo->spline_params.ELL_MAX_CORR)+1,l_arr,cl_arr,taper_cl_limits); } int local_status, i_L; #pragma omp parallel default(none) \ shared(cosmo, theta, cl_arr, wtheta, n_theta, status, corr_type) \ private(Pl_theta, i, i_L, local_status) { Pl_theta = NULL; local_status = *status; if (local_status == 0) { Pl_theta = malloc(sizeof(double)*((int)(cosmo->spline_params.ELL_MAX_CORR)+1)); if (Pl_theta == NULL) { local_status = CCL_ERROR_MEMORY; } } #pragma omp for schedule(dynamic) for (int i=0; i < n_theta; i++) { if (local_status == 0) { wtheta[i] = 0; ccl_compute_legendre_polynomial(corr_type, theta[i], (int)(cosmo->spline_params.ELL_MAX_CORR), Pl_theta); for (i_L=1; i_L < (int)(cosmo->spline_params.ELL_MAX_CORR); i_L+=1) wtheta[i] += cl_arr[i_L]*Pl_theta[i_L]; wtheta[i] /= (M_PI*4); } } if (local_status) { #pragma omp atomic write *status = local_status; } free(Pl_theta); } free(l_arr); free(cl_arr); } /*--------ROUTINE: ccl_tracer_corr ------ TASK: For a given tracer, get the correlation function. Do so by running ccl_angular_cls. If you already have Cls calculated, go to the next function to pass them directly. INPUT: cosmology, number of theta values to evaluate = NL, theta vector, tracer 1, tracer 2, i_bessel, key for tapering, limits of tapering correlation function. */ void ccl_correlation(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int do_taper_cl,double *taper_cl_limits,int flag_method, int *status) { switch(flag_method) { case CCL_CORR_FFTLOG : ccl_tracer_corr_fftlog(cosmo,n_ell,ell,cls,n_theta,theta,wtheta,corr_type, do_taper_cl,taper_cl_limits,status); break; case CCL_CORR_LGNDRE : ccl_tracer_corr_legendre(cosmo,n_ell,ell,cls,n_theta,theta,wtheta,corr_type, do_taper_cl,taper_cl_limits,status); break; case CCL_CORR_BESSEL : ccl_tracer_corr_bessel(cosmo,n_ell,ell,cls,n_theta,theta,wtheta,corr_type,status); break; default : *status=CCL_ERROR_INCONSISTENT; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation. Unknown algorithm\n"); } } /*--------ROUTINE: ccl_correlation_3d ------ TASK: Calculate the 3d-correlation function. Do so by using FFTLog. INPUT: cosmology, scale factor a, number of r values, r values, key for tapering, limits of tapering Correlation function result will be in array xi */ void ccl_correlation_3d(ccl_cosmology *cosmo, double a, int n_r,double *r,double *xi, int do_taper_pk,double *taper_pk_limits, int *status) { int i,N_ARR; double *k_arr,*pk_arr,*r_arr,*xi_arr; if (!cosmo->computed_nonlin_power) { *status = CCL_ERROR_NONLIN_POWER_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_correlation_3d(): non-linear power spctrum has not been computed!"); return; } //number of data points for k and pk array N_ARR=(int)(cosmo->spline_params.N_K_3DCOR*log10(cosmo->spline_params.K_MAX/cosmo->spline_params.K_MIN)); k_arr=ccl_log_spacing(cosmo->spline_params.K_MIN,cosmo->spline_params.K_MAX,N_ARR); if(k_arr==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d ran out of memory\n"); return; } pk_arr=malloc(N_ARR*sizeof(double)); if(pk_arr==NULL) { free(k_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d ran out of memory\n"); return; } for (i=0; i<N_ARR; i++){ pk_arr[i] = ccl_nonlin_matter_power(cosmo, k_arr[i], a, status); } if (do_taper_pk) taper_cl(N_ARR,k_arr,pk_arr,taper_pk_limits); r_arr=malloc(sizeof(double)*N_ARR); if(r_arr==NULL) { free(k_arr); free(pk_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d ran out of memory\n"); return; } xi_arr=malloc(sizeof(double)*N_ARR); if(xi_arr==NULL) { free(k_arr); free(pk_arr); free(r_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d ran out of memory\n"); return; } for(i=0;i<N_ARR;i++) r_arr[i]=0; ccl_fftlog_ComputeXi3D(0, 0, 1, N_ARR, k_arr, &pk_arr, r_arr, &xi_arr, status); // Interpolate to output values of r ccl_f1d_t *xi_spl=ccl_f1d_t_new(N_ARR,r_arr,xi_arr,xi_arr[0],0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (xi_spl == NULL) { free(k_arr); free(pk_arr); free(r_arr); free(xi_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d ran out of memory\n"); return; } for(i=0;i<n_r;i++) xi[i]=ccl_f1d_t_eval(xi_spl,r[i]); ccl_f1d_t_free(xi_spl); free(k_arr); free(pk_arr); free(r_arr); free(xi_arr); return; } /*--------ROUTINE: ccl_correlation_multipole ------ TASK: Calculate multipole of the redshift space correlation function. Do so using FFTLog. INPUT: cosmology, scale factor a, beta (= growth rate / bias), multipole order l = 0, 2, or 4, number of s values, s values Multipole function result will be in array xi */ void ccl_correlation_multipole(ccl_cosmology *cosmo, double a, double beta, int l, int n_s, double *s, double *xi, int *status) { int i, N_ARR; double *k_arr, *pk_arr, *s_arr, *xi_arr, *xi_arr0; if (!cosmo->computed_nonlin_power) { *status = CCL_ERROR_NONLIN_POWER_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_correlation_multipole(): non-linear power spctrum has not been computed!"); return; } N_ARR = (int)(cosmo->spline_params.N_K_3DCOR * log10(cosmo->spline_params.K_MAX / cosmo->spline_params.K_MIN)); k_arr = ccl_log_spacing(cosmo->spline_params.K_MIN, cosmo->spline_params.K_MAX, N_ARR); if (k_arr == NULL) { *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); return; } pk_arr = malloc(N_ARR * sizeof(double)); if (pk_arr == NULL) { free(k_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); return; } for (i = 0; i < N_ARR; i++) pk_arr[i] = ccl_nonlin_matter_power(cosmo, k_arr[i], a, status); s_arr = malloc(sizeof(double) * N_ARR); if (s_arr == NULL) { free(k_arr); free(pk_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); return; } xi_arr = malloc(sizeof(double) * N_ARR); if (xi_arr == NULL) { free(k_arr); free(pk_arr); free(s_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); return; } xi_arr0 = malloc(sizeof(double) * N_ARR); if (xi_arr0 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); return; } for (i = 0; i < N_ARR; i++) s_arr[i] = 0; // Calculate multipoles if (l == 0) { ccl_fftlog_ComputeXi3D(0, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr0, status); for (i = 0; i < N_ARR; i++) xi_arr[i] = (1. + 2. / 3 * beta + 1. / 5 * beta * beta) * xi_arr0[i]; } else if (l == 2) { ccl_fftlog_ComputeXi3D(2, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr0, status); for (i = 0; i < N_ARR; i++) xi_arr[i] = -(4. / 3 * beta + 4. / 7 * beta * beta) * xi_arr0[i]; } else if (l == 4) { ccl_fftlog_ComputeXi3D(4, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr0, status); for (i = 0; i < N_ARR; i++) xi_arr[i] = 8. / 35 * beta * beta * xi_arr0[i]; } else { strcpy(cosmo->status_message, "unavailable value of l\n"); return; } // Interpolate to output values of s ccl_f1d_t *xi_spl = ccl_f1d_t_new(N_ARR, s_arr, xi_arr, xi_arr[0], 0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (xi_spl == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); } for (i = 0; i < n_s; i++) xi[i] = ccl_f1d_t_eval(xi_spl,s[i]); ccl_f1d_t_free(xi_spl); free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); return; } /*--------ROUTINE: ccl_correlation_multipole_spline ------ TASK: Store multipoles of the redshift-space correlation in global splines INPUT: cosmology, scale factor a Result is stored in cosmo->data.rsd_splines[] */ void ccl_correlation_multipole_spline(ccl_cosmology *cosmo, double a, int *status) { int i, N_ARR; double *k_arr, *pk_arr, *s_arr, *xi_arr, *xi_arr0, *xi_arr2, *xi_arr4; if (!cosmo->computed_nonlin_power) { *status = CCL_ERROR_NONLIN_POWER_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): non-linear power spctrum has not been computed!"); return; } N_ARR = (int)(cosmo->spline_params.N_K_3DCOR * log10(cosmo->spline_params.K_MAX / cosmo->spline_params.K_MIN)); k_arr = ccl_log_spacing(cosmo->spline_params.K_MIN, cosmo->spline_params.K_MAX, N_ARR); if (k_arr == NULL) { *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } pk_arr = malloc(N_ARR * sizeof(double)); if (pk_arr == NULL) { free(k_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } for (i = 0; i < N_ARR; i++) pk_arr[i] = ccl_nonlin_matter_power(cosmo, k_arr[i], a, status); s_arr = malloc(sizeof(double) * N_ARR); if (s_arr == NULL) { free(k_arr); free(pk_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } xi_arr = malloc(sizeof(double) * N_ARR); if (xi_arr == NULL) { free(k_arr); free(pk_arr); free(s_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } xi_arr0 = malloc(sizeof(double) * N_ARR); if (xi_arr0 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } xi_arr2 = malloc(sizeof(double) * N_ARR); if (xi_arr2 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } xi_arr4 = malloc(sizeof(double) * N_ARR); if (xi_arr4 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } for (i = 0; i < N_ARR; i++) s_arr[i] = 0; // Calculate multipoles ccl_fftlog_ComputeXi3D(0, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr0, status); ccl_fftlog_ComputeXi3D(2, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr2, status); ccl_fftlog_ComputeXi3D(4, 0, 1, N_ARR, k_arr, &pk_arr, s_arr, &xi_arr4, status); // free any memory that may have been allocated ccl_f1d_t_free(cosmo->data.rsd_splines[0]); ccl_f1d_t_free(cosmo->data.rsd_splines[1]); ccl_f1d_t_free(cosmo->data.rsd_splines[2]); cosmo->data.rsd_splines[0] = NULL; cosmo->data.rsd_splines[1] = NULL; cosmo->data.rsd_splines[1] = NULL; // Interpolate to output values of s cosmo->data.rsd_splines[0] = ccl_f1d_t_new(N_ARR, s_arr, xi_arr0, xi_arr0[0], 0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (cosmo->data.rsd_splines[0] == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } cosmo->data.rsd_splines[1] = ccl_f1d_t_new(N_ARR, s_arr, xi_arr2, xi_arr2[0], 0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (cosmo->data.rsd_splines[1] == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); ccl_f1d_t_free(cosmo->data.rsd_splines[0]); cosmo->data.rsd_splines[0] = NULL; *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } cosmo->data.rsd_splines[2] = ccl_f1d_t_new(N_ARR, s_arr, xi_arr4, xi_arr4[0], 0, ccl_f1d_extrap_const, ccl_f1d_extrap_const, status); if (cosmo->data.rsd_splines[2] == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); ccl_f1d_t_free(cosmo->data.rsd_splines[0]); cosmo->data.rsd_splines[0] = NULL; ccl_f1d_t_free(cosmo->data.rsd_splines[1]); cosmo->data.rsd_splines[1] = NULL; *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } // set the scale factor cosmo->data.rsd_splines_scalefactor = a; free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); return; } /*--------ROUTINE: ccl_correlation_3dRsd ------ TASK: Calculate the redshift-space correlation function. INPUT: cosmology, scale factor a, number of s values, s values, mu = cosine of galaxy separation angle w.r.t. line of sight, beta (= growth rate / bias), key for using spline Correlation function result will be in array xi */ void ccl_correlation_3dRsd(ccl_cosmology *cosmo, double a, int n_s, double *s, double mu, double beta, double *xi, int use_spline, int *status) { int i; double *xi_arr0, *xi_arr2, *xi_arr4; if (!cosmo->computed_nonlin_power) { *status = CCL_ERROR_NONLIN_POWER_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_correlation_3dRsd(): non-linear power spctrum has not been computed!"); return; } if (use_spline == 0) { xi_arr0 = malloc(sizeof(double) * n_s); if (xi_arr0 == NULL) { *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_3dRsd ran out of memory\n"); return; } xi_arr2 = malloc(sizeof(double) * n_s); if (xi_arr2 == NULL) { free(xi_arr0); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_3dRsd ran out of memory\n"); return; } xi_arr4 = malloc(sizeof(double) * n_s); if (xi_arr4 == NULL) { free(xi_arr0); free(xi_arr2); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_3dRsd ran out of memory\n"); return; } ccl_correlation_multipole(cosmo, a, beta, 0, n_s, s, xi_arr0, status); ccl_correlation_multipole(cosmo, a, beta, 2, n_s, s, xi_arr2, status); ccl_correlation_multipole(cosmo, a, beta, 4, n_s, s, xi_arr4, status); for (i = 0; i < n_s; i++) xi[i] = xi_arr0[i] + xi_arr2[i] * gsl_sf_legendre_Pl(2, mu) + xi_arr4[i] * gsl_sf_legendre_Pl(4, mu); free(xi_arr0); free(xi_arr2); free(xi_arr4); } else { if ((cosmo->data.rsd_splines[0] == NULL) || (cosmo->data.rsd_splines[1] == NULL) || (cosmo->data.rsd_splines[2] == NULL) || (cosmo->data.rsd_splines_scalefactor != a)) ccl_correlation_multipole_spline(cosmo, a, status); for (i = 0; i < n_s; i++) xi[i] = (1. + 2. / 3 * beta + 1. / 5 * beta * beta) * ccl_f1d_t_eval(cosmo->data.rsd_splines[0],s[i]) - (4. / 3 * beta + 4. / 7 * beta * beta) * ccl_f1d_t_eval(cosmo->data.rsd_splines[1],s[i]) * gsl_sf_legendre_Pl(2, mu) + 8. / 35 * beta * beta * ccl_f1d_t_eval(cosmo->data.rsd_splines[2],s[i]) * gsl_sf_legendre_Pl(4, mu); } return; } /*--------ROUTINE: ccl_correlation_3dRsd_avgmu ------ TASK: Calculate the average of redshift-space correlation function xi(s,mu) over mu at constant s INPUT: cosmology, scale factor a, number of s values, s values, beta (= growth rate / bias) The result will be in array xi */ void ccl_correlation_3dRsd_avgmu(ccl_cosmology *cosmo, double a, int n_s, double *s, double beta, double *xi, int *status) { // The average is just the l=0 multipole - the higher multiples inetegrate to zero. ccl_correlation_multipole(cosmo, a, beta, 0, n_s, s, xi, status); return; } /*--------ROUTINE: ccl_correlation_pi_sigma ------ TASK: Calculate the redshift-space correlation function using longitudinal and transverse coordinates pi and sigma. INPUT: cosmology, scale factor a, beta (= growth rate / bias), pi, number of sigma values, sigma values, key for using spline Correlation function result will be in array xi */ void ccl_correlation_pi_sigma(ccl_cosmology *cosmo, double a, double beta, double pi, int n_sig, double *sig, double *xi, int use_spline, int *status) { int i; double *mu_arr, *s_arr, *xi_arr; if (!cosmo->computed_nonlin_power) { *status = CCL_ERROR_NONLIN_POWER_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_correlation_pi_sigma(): non-linear power spctrum has not been computed!"); return; } mu_arr = malloc(sizeof(double) * n_sig); if (mu_arr == NULL) { *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_pi_sigma ran out of memory\n"); return; } s_arr = malloc(sizeof(double) * n_sig); if (s_arr == NULL) { free(mu_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_pi_sigma ran out of memory\n"); return; } xi_arr = malloc(sizeof(double) * n_sig); if (xi_arr == NULL) { free(mu_arr); free(s_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_pi_sigma ran out of memory\n"); return; } for (i = 0; i < n_sig; i++) { s_arr[i] = sqrt(pi * pi + sig[i] * sig[i]); mu_arr[i] = pi / s_arr[i]; } for (i = 0; i < n_sig; i++) { ccl_correlation_3dRsd(cosmo, a, n_sig, s_arr, mu_arr[i], beta, xi_arr, use_spline, status); xi[i] = xi_arr[i]; } free(mu_arr); free(xi_arr); free(s_arr); return; }
distribute_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify=expected,omp50 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp distribute simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd safelen(4) void test_no_clause(void) { int i; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{statement after '#pragma omp distribute simd' must be a for loop}} #pragma omp distribute simd ++i; } void test_branch_protected_scope(void) { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause(void) { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers(void) { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(void); void test_safelen(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+3 2 {{defined as reduction}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp distribute simd' directive may not be reduction, predetermined as lastprivate}} // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; #pragma omp target #pragma omp teams for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) #pragma omp distribute simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd linear(x, y, z) for (i = 0; i < 16; ++i) ; } void test_aligned(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private(void) { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate(void) { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate(void) { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction(void) { int i, x, y; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction( : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(+ for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+: for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages(void) { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void linear_modifiers(int argc) { int k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(k) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(val(k)) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(uval(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(ref(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(foo(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; } void test_nontemporal(void) { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 2 {{expected expression}} #pragma omp distribute simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected expression}} #pragma omp distribute simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected expression}} #pragma omp distribute simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} omp50-error@+1 {{expected variable name}} #pragma omp distribute simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp distribute simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp distribute simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp distribute simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp distribute simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute simd'}} for (int i = 0; i < 10; ++i) ; }
GB_unaryop__minv_fp32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp32_uint16 // op(A') function: GB_tran__minv_fp32_uint16 // C type: float // A type: uint16_t // cast: float cij = (float) aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp32_uint16 ( float *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
networking.c
#include "networking.h" void WinSockStart() { WSADATA wsa; if (WSAStartup(MAKEWORD(2,2), &wsa) != 0) { LFATAL("WSAStartup", WSAGetLastError()); } } void WinSockClose() { WSACleanup(); } // http://www.binarytides.com/winsock-socket-programming-tutorial/ int TrySend(ZL_ulong IP, ZL_ushort port, ZL_cstring message, ZL_ulong max_ping) { SOCKET s; s = socket(AF_INET, SOCK_STREAM, 0); if(s == INVALID_SOCKET) { ITOA_R(WSAGetLastError(), out); LDEBUG("Error in TrySend socket: %s", out); return 1; } ZL_ulong nonblocking = 1; int ioctlsocket_result = ioctlsocket(s, FIONBIO, &nonblocking); if (ioctlsocket_result != 0) { LFATAL("TrySend - cannot change socket mode", 1); } struct sockaddr_in addr; addr.sin_addr.s_addr = IP; addr.sin_family = AF_INET; addr.sin_port = htons(port); LDEBUG("Trying %s...\n", inet_ntoa(addr.sin_addr)); int connect_result = connect(s, (struct sockaddr *)&addr, sizeof(addr)); if (connect_result < 0) { if (connect_result == SOCKET_ERROR) { int error = WSAGetLastError(); if (error != WSAEWOULDBLOCK) { ITOA_R(WSAGetLastError(), out); LDEBUG("Error in TrySend connect: %s", out); closesocket(s); return 1; } } else { LDEBUG("Error in TrySend connect: %d [NON-WSA]", connect_result); closesocket(s); return 1; } } else { LFATAL("Error in TrySend - socket is blocking", 1); } fd_set fdset; struct timeval tv; FD_ZERO(&fdset); FD_SET(s, &fdset); tv.tv_sec = max_ping / 1000; tv.tv_usec = (max_ping - (tv.tv_sec * 1000)) * 1000; int select_result = select(0, NULL, &fdset, NULL, &tv); if (select_result == 0) { closesocket(s); return 1; } else if (select_result == SOCKET_ERROR) { ITOA_R(WSAGetLastError(), out); LDEBUG("Error in TrySend select: %s", out); closesocket(s); return 1; } else { // safe to send in 1 call as long as SETTINGS_MAX_VAR_LEN (1024) < SO_MAX_MSG_SIZE (8195) if (send(s, message, strlen(message), 0) == SOCKET_ERROR) { ITOA_R(WSAGetLastError(), out); LDEBUG("Error in TrySend send: %s", out); closesocket(s); return 1; } } LDEBUG("Sent to %s!\n", inet_ntoa(addr.sin_addr)); closesocket(s); return 0; } // TODO: do not ping localhost void TryBroadcastNetwork(ZL_ulong start, ZL_ulong len, ZL_ushort port, ZL_cstring message, ZL_ulong max_ping) { LDEBUG("Starting scanning network %ldL, %ld elements\n", start, len); #if DEBUG clock_t currentTime = clock(); #endif omp_set_dynamic(0); omp_set_num_threads(len); #pragma omp parallel default(none) shared(start, port, message, max_ping, len) { #pragma omp single { LDEBUG("Running %d threads\n", omp_get_num_threads()); } #pragma omp for for (ZL_ulong cur = 0; cur < len; ++cur) { TrySend(start + SWAP_UINT32(cur), port, message, max_ping); } } LDEBUG("TryBroadcastNetwork took %ld to complete\n", clock() - currentTime); } void TryBroadcastAllNetworks(ZL_ushort port, ZL_cstring message, ZL_ulong max_ping) { PIP_ADAPTER_INFO pAdapterInfo; PIP_ADAPTER_INFO pAdapter; IP_ADDR_STRING* pAddrStruct; ZL_ulong ulOutBufLen = 0; GetAdaptersInfo(NULL, &ulOutBufLen); pAdapterInfo = (IP_ADAPTER_INFO*) malloc(ulOutBufLen); if (GetAdaptersInfo(pAdapterInfo, &ulOutBufLen) == NO_ERROR) { pAdapter = pAdapterInfo; while (pAdapter) { pAddrStruct = &(pAdapter->IpAddressList); while (pAddrStruct) { ZL_ulong binaryIP = inet_addr(pAddrStruct->IpAddress.String); ZL_ulong netmask = inet_addr(pAddrStruct->IpMask.String); // to test with smaller network - 0xc0ffffff // Windows specific - long = int // error == int(-1) == uint(uint_max) if ( binaryIP != ULONG_MAX && binaryIP != 0 && netmask != ULONG_MAX // if mask is 255.255.255.255, we cannot broadcast anyway && netmask != 0) { ZL_ulong subnet = binaryIP & netmask; ZL_ulong subnet_length = SWAP_UINT32(ULONG_MAX & (~netmask)); TryBroadcastNetwork( subnet, subnet_length, // TODO: is x.x.x.0 valid IP for a device? if not, then: subnet + 1, subnet_length - 1 port, message, max_ping ); } pAddrStruct = pAddrStruct->Next; } pAdapter = pAdapter->Next; } } else { LDEBUG("Error: TryBroadcastAllNetworks failed completely"); } SFREE(pAdapterInfo); }
struct_vector.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * * Member functions for hypre_StructVector class. * *****************************************************************************/ #include "_hypre_struct_mv.h" /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ hypre_StructVector * hypre_StructVectorCreate( MPI_Comm comm, hypre_StructGrid *grid ) { HYPRE_Int ndim = hypre_StructGridNDim(grid); hypre_StructVector *vector; HYPRE_Int i; vector = hypre_CTAlloc(hypre_StructVector, 1); hypre_StructVectorComm(vector) = comm; hypre_StructGridRef(grid, &hypre_StructVectorGrid(vector)); hypre_StructVectorDataAlloced(vector) = 1; hypre_StructVectorBGhostNotClear(vector) = 0; hypre_StructVectorRefCount(vector) = 1; /* set defaults */ for (i = 0; i < 2*ndim; i++) { hypre_StructVectorNumGhost(vector)[i] = hypre_StructGridNumGhost(grid)[i]; } return vector; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ hypre_StructVector * hypre_StructVectorRef( hypre_StructVector *vector ) { hypre_StructVectorRefCount(vector) ++; return vector; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorDestroy( hypre_StructVector *vector ) { if (vector) { hypre_StructVectorRefCount(vector) --; if (hypre_StructVectorRefCount(vector) == 0) { if (hypre_StructVectorDataAlloced(vector)) { hypre_SharedTFree(hypre_StructVectorData(vector)); } hypre_TFree(hypre_StructVectorDataIndices(vector)); hypre_BoxArrayDestroy(hypre_StructVectorDataSpace(vector)); hypre_StructGridDestroy(hypre_StructVectorGrid(vector)); hypre_TFree(vector); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorInitializeShell( hypre_StructVector *vector ) { HYPRE_Int ndim = hypre_StructVectorNDim(vector); hypre_StructGrid *grid; HYPRE_Int *num_ghost; hypre_BoxArray *data_space; hypre_BoxArray *boxes; hypre_Box *box; hypre_Box *data_box; HYPRE_Int *data_indices; HYPRE_Int data_size; HYPRE_Int i, d; /*----------------------------------------------------------------------- * Set up data_space *-----------------------------------------------------------------------*/ grid = hypre_StructVectorGrid(vector); if (hypre_StructVectorDataSpace(vector) == NULL) { num_ghost = hypre_StructVectorNumGhost(vector); boxes = hypre_StructGridBoxes(grid); data_space = hypre_BoxArrayCreate(hypre_BoxArraySize(boxes), ndim); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); data_box = hypre_BoxArrayBox(data_space, i); hypre_CopyBox(box, data_box); for (d = 0; d < ndim; d++) { hypre_BoxIMinD(data_box, d) -= num_ghost[2*d]; hypre_BoxIMaxD(data_box, d) += num_ghost[2*d + 1]; } } hypre_StructVectorDataSpace(vector) = data_space; } /*----------------------------------------------------------------------- * Set up data_indices array and data_size *-----------------------------------------------------------------------*/ if (hypre_StructVectorDataIndices(vector) == NULL) { data_space = hypre_StructVectorDataSpace(vector); data_indices = hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(data_space)); data_size = 0; hypre_ForBoxI(i, data_space) { data_box = hypre_BoxArrayBox(data_space, i); data_indices[i] = data_size; data_size += hypre_BoxVolume(data_box); } hypre_StructVectorDataIndices(vector) = data_indices; hypre_StructVectorDataSize(vector) = data_size; } /*----------------------------------------------------------------------- * Set total number of nonzero coefficients *-----------------------------------------------------------------------*/ hypre_StructVectorGlobalSize(vector) = hypre_StructGridGlobalSize(grid); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorInitializeData( hypre_StructVector *vector, HYPRE_Complex *data ) { hypre_StructVectorData(vector) = data; hypre_StructVectorDataAlloced(vector) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorInitialize( hypre_StructVector *vector ) { HYPRE_Complex *data; hypre_StructVectorInitializeShell(vector); data = hypre_SharedCTAlloc(HYPRE_Complex, hypre_StructVectorDataSize(vector)); hypre_StructVectorInitializeData(vector, data); hypre_StructVectorDataAlloced(vector) = 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * * (outside > 0): set values possibly outside of the grid extents * (outside = 0): set values only inside the grid extents * * NOTE: Getting and setting values outside of the grid extents requires care, * as these values may be stored in multiple ghost zone locations. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorSetValues( hypre_StructVector *vector, hypre_Index grid_index, HYPRE_Complex *values, HYPRE_Int action, HYPRE_Int boxnum, HYPRE_Int outside ) { hypre_BoxArray *grid_boxes; hypre_Box *grid_box; HYPRE_Complex *vecp; HYPRE_Int i, istart, istop; if (outside > 0) { grid_boxes = hypre_StructVectorDataSpace(vector); } else { grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector)); } if (boxnum < 0) { istart = 0; istop = hypre_BoxArraySize(grid_boxes); } else { istart = boxnum; istop = istart + 1; } for (i = istart; i < istop; i++) { grid_box = hypre_BoxArrayBox(grid_boxes, i); if (hypre_IndexInBox(grid_index, grid_box)) { vecp = hypre_StructVectorBoxDataValue(vector, i, grid_index); if (action > 0) { *vecp += *values; } else if (action > -1) { *vecp = *values; } else /* action < 0 */ { *values = *vecp; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * * (outside > 0): set values possibly outside of the grid extents * (outside = 0): set values only inside the grid extents * * NOTE: Getting and setting values outside of the grid extents requires care, * as these values may be stored in multiple ghost zone locations. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorSetBoxValues( hypre_StructVector *vector, hypre_Box *set_box, hypre_Box *value_box, HYPRE_Complex *values, HYPRE_Int action, HYPRE_Int boxnum, HYPRE_Int outside ) { hypre_BoxArray *grid_boxes; hypre_Box *grid_box; hypre_Box *int_box; hypre_BoxArray *data_space; hypre_Box *data_box; hypre_IndexRef data_start; hypre_Index data_stride; HYPRE_Int datai; HYPRE_Complex *datap; hypre_Box *dval_box; hypre_Index dval_start; hypre_Index dval_stride; HYPRE_Int dvali; hypre_Index loop_size; HYPRE_Int i, istart, istop; /*----------------------------------------------------------------------- * Initialize some things *-----------------------------------------------------------------------*/ if (outside > 0) { grid_boxes = hypre_StructVectorDataSpace(vector); } else { grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector)); } data_space = hypre_StructVectorDataSpace(vector); if (boxnum < 0) { istart = 0; istop = hypre_BoxArraySize(grid_boxes); } else { istart = boxnum; istop = istart + 1; } /*----------------------------------------------------------------------- * Set the vector coefficients *-----------------------------------------------------------------------*/ hypre_SetIndex(data_stride, 1); int_box = hypre_BoxCreate(hypre_StructVectorNDim(vector)); dval_box = hypre_BoxDuplicate(value_box); hypre_SetIndex(dval_stride, 1); for (i = istart; i < istop; i++) { grid_box = hypre_BoxArrayBox(grid_boxes, i); data_box = hypre_BoxArrayBox(data_space, i); hypre_IntersectBoxes(set_box, grid_box, int_box); /* if there was an intersection */ if (hypre_BoxVolume(int_box)) { data_start = hypre_BoxIMin(int_box); hypre_CopyIndex(data_start, dval_start); datap = hypre_StructVectorBoxData(vector, i); hypre_BoxGetSize(int_box, loop_size); if (action > 0) { hypre_BoxLoop2Begin(hypre_StructVectorNDim(vector), loop_size, data_box,data_start,data_stride,datai, dval_box,dval_start,dval_stride,dvali); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(datai, dvali) { datap[datai] += values[dvali]; } hypre_BoxLoop2End(datai, dvali); } else if (action > -1) { hypre_BoxLoop2Begin(hypre_StructVectorNDim(vector), loop_size, data_box,data_start,data_stride,datai, dval_box,dval_start,dval_stride,dvali); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(datai, dvali) { datap[datai] = values[dvali]; } hypre_BoxLoop2End(datai, dvali); } else /* action < 0 */ { hypre_BoxLoop2Begin(hypre_StructVectorNDim(vector), loop_size, data_box,data_start,data_stride,datai, dval_box,dval_start,dval_stride,dvali); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(datai, dvali) { values[dvali] = datap[datai]; } hypre_BoxLoop2End(datai, dvali); } } } hypre_BoxDestroy(int_box); hypre_BoxDestroy(dval_box); return hypre_error_flag; } /*-------------------------------------------------------------------------- * (outside > 0): clear values possibly outside of the grid extents * (outside = 0): clear values only inside the grid extents *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorClearValues( hypre_StructVector *vector, hypre_Index grid_index, HYPRE_Int boxnum, HYPRE_Int outside ) { hypre_BoxArray *grid_boxes; hypre_Box *grid_box; HYPRE_Complex *vecp; HYPRE_Int i, istart, istop; if (outside > 0) { grid_boxes = hypre_StructVectorDataSpace(vector); } else { grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector)); } if (boxnum < 0) { istart = 0; istop = hypre_BoxArraySize(grid_boxes); } else { istart = boxnum; istop = istart + 1; } for (i = istart; i < istop; i++) { grid_box = hypre_BoxArrayBox(grid_boxes, i); if (hypre_IndexInBox(grid_index, grid_box)) { vecp = hypre_StructVectorBoxDataValue(vector, i, grid_index); *vecp = 0.0; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (outside > 0): clear values possibly outside of the grid extents * (outside = 0): clear values only inside the grid extents *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorClearBoxValues( hypre_StructVector *vector, hypre_Box *clear_box, HYPRE_Int boxnum, HYPRE_Int outside ) { hypre_BoxArray *grid_boxes; hypre_Box *grid_box; hypre_Box *int_box; hypre_BoxArray *data_space; hypre_Box *data_box; hypre_IndexRef data_start; hypre_Index data_stride; HYPRE_Int datai; HYPRE_Complex *datap; hypre_Index loop_size; HYPRE_Int i, istart, istop; /*----------------------------------------------------------------------- * Initialize some things *-----------------------------------------------------------------------*/ if (outside > 0) { grid_boxes = hypre_StructVectorDataSpace(vector); } else { grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector)); } data_space = hypre_StructVectorDataSpace(vector); if (boxnum < 0) { istart = 0; istop = hypre_BoxArraySize(grid_boxes); } else { istart = boxnum; istop = istart + 1; } /*----------------------------------------------------------------------- * Set the vector coefficients *-----------------------------------------------------------------------*/ hypre_SetIndex(data_stride, 1); int_box = hypre_BoxCreate(hypre_StructVectorNDim(vector)); for (i = istart; i < istop; i++) { grid_box = hypre_BoxArrayBox(grid_boxes, i); data_box = hypre_BoxArrayBox(data_space, i); hypre_IntersectBoxes(clear_box, grid_box, int_box); /* if there was an intersection */ if (hypre_BoxVolume(int_box)) { data_start = hypre_BoxIMin(int_box); datap = hypre_StructVectorBoxData(vector, i); hypre_BoxGetSize(int_box, loop_size); hypre_BoxLoop1Begin(hypre_StructVectorNDim(vector), loop_size, data_box,data_start,data_stride,datai); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(datai) { datap[datai] = 0.0; } hypre_BoxLoop1End(datai); } } hypre_BoxDestroy(int_box); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorClearAllValues( hypre_StructVector *vector ) { HYPRE_Complex *data = hypre_StructVectorData(vector); HYPRE_Int data_size = hypre_StructVectorDataSize(vector); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < data_size; i++) { data[i] = 0.0; } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorSetNumGhost( hypre_StructVector *vector, HYPRE_Int *num_ghost ) { HYPRE_Int d, ndim = hypre_StructVectorNDim(vector); for (d = 0; d < ndim; d++) { hypre_StructVectorNumGhost(vector)[2*d] = num_ghost[2*d]; hypre_StructVectorNumGhost(vector)[2*d + 1] = num_ghost[2*d + 1]; } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorAssemble( hypre_StructVector *vector ) { return hypre_error_flag; } /*-------------------------------------------------------------------------- * copies data from x to y * y has its own data array, so this is a deep copy in that sense. * The grid and other size information are not copied - they are * assumed to have already been set up to be consistent. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorCopy( hypre_StructVector *x, hypre_StructVector *y ) { hypre_Box *x_data_box; HYPRE_Int vi; HYPRE_Complex *xp, *yp; hypre_BoxArray *boxes; hypre_Box *box; hypre_Index loop_size; hypre_IndexRef start; hypre_Index unit_stride; HYPRE_Int i; /*----------------------------------------------------------------------- * Set the vector coefficients *-----------------------------------------------------------------------*/ hypre_SetIndex(unit_stride, 1); boxes = hypre_StructGridBoxes( hypre_StructVectorGrid(x) ); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); start = hypre_BoxIMin(box); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); xp = hypre_StructVectorBoxData(x, i); yp = hypre_StructVectorBoxData(y, i); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop1Begin(hypre_StructVectorNDim(x), loop_size, x_data_box, start, unit_stride, vi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi ) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(vi) { yp[vi] = xp[vi]; } hypre_BoxLoop1End(vi); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorSetConstantValues( hypre_StructVector *vector, HYPRE_Complex values ) { hypre_Box *v_data_box; HYPRE_Int vi; HYPRE_Complex *vp; hypre_BoxArray *boxes; hypre_Box *box; hypre_Index loop_size; hypre_IndexRef start; hypre_Index unit_stride; HYPRE_Int i; /*----------------------------------------------------------------------- * Set the vector coefficients *-----------------------------------------------------------------------*/ hypre_SetIndex(unit_stride, 1); boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector)); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); start = hypre_BoxIMin(box); v_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), i); vp = hypre_StructVectorBoxData(vector, i); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop1Begin(hypre_StructVectorNDim(vector), loop_size, v_data_box, start, unit_stride, vi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi ) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(vi) { vp[vi] = values; } hypre_BoxLoop1End(vi); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * Takes a function pointer of the form: HYPRE_Complex f(i,j,k) * RDF: This function doesn't appear to be used anywhere. *--------------------------------------------------------------------------*/ /* ONLY3D */ HYPRE_Int hypre_StructVectorSetFunctionValues( hypre_StructVector *vector, HYPRE_Complex (*fcn)(HYPRE_Int, HYPRE_Int, HYPRE_Int) ) { hypre_Box *v_data_box; HYPRE_Int vi; HYPRE_Complex *vp; hypre_BoxArray *boxes; hypre_Box *box; hypre_Index loop_size; hypre_IndexRef start; hypre_Index unit_stride; HYPRE_Int b, i, j, k; /*----------------------------------------------------------------------- * Set the vector coefficients *-----------------------------------------------------------------------*/ hypre_SetIndex(unit_stride, 1); boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector)); hypre_ForBoxI(b, boxes) { box = hypre_BoxArrayBox(boxes, b); start = hypre_BoxIMin(box); v_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), b); vp = hypre_StructVectorBoxData(vector, b); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop1Begin(hypre_StructVectorNDim(vector), loop_size, v_data_box, start, unit_stride, vi); i = hypre_IndexD(start, 0); j = hypre_IndexD(start, 1); k = hypre_IndexD(start, 2); /* RDF: This won't work as written with threading on */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi ) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(vi) { vp[vi] = fcn(i, j, k); i++; j++; k++; } hypre_BoxLoop1End(vi); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorClearGhostValues( hypre_StructVector *vector ) { HYPRE_Int ndim = hypre_StructVectorNDim(vector); hypre_Box *v_data_box; HYPRE_Int vi; HYPRE_Complex *vp; hypre_BoxArray *boxes; hypre_Box *box; hypre_BoxArray *diff_boxes; hypre_Box *diff_box; hypre_Index loop_size; hypre_IndexRef start; hypre_Index unit_stride; HYPRE_Int i, j; /*----------------------------------------------------------------------- * Set the vector coefficients *-----------------------------------------------------------------------*/ hypre_SetIndex(unit_stride, 1); boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector)); diff_boxes = hypre_BoxArrayCreate(0, ndim); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); v_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), i); hypre_BoxArraySetSize(diff_boxes, 0); hypre_SubtractBoxes(v_data_box, box, diff_boxes); vp = hypre_StructVectorBoxData(vector, i); hypre_ForBoxI(j, diff_boxes) { diff_box = hypre_BoxArrayBox(diff_boxes, j); start = hypre_BoxIMin(diff_box); hypre_BoxGetSize(diff_box, loop_size); hypre_BoxLoop1Begin(hypre_StructVectorNDim(vector), loop_size, v_data_box, start, unit_stride, vi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi ) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(vi) { vp[vi] = 0.0; } hypre_BoxLoop1End(vi); } } hypre_BoxArrayDestroy(diff_boxes); return hypre_error_flag; } /*-------------------------------------------------------------------------- * clears vector values on the physical boundaries *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorClearBoundGhostValues( hypre_StructVector *vector, HYPRE_Int force ) { HYPRE_Int ndim = hypre_StructVectorNDim(vector); HYPRE_Int vi; HYPRE_Complex *vp; hypre_BoxArray *boxes; hypre_Box *box; hypre_Box *v_data_box; hypre_Index loop_size; hypre_IndexRef start; hypre_Index stride; hypre_Box *bbox; hypre_StructGrid *grid; hypre_BoxArray *boundary_boxes; hypre_BoxArray *array_of_box; hypre_BoxArray *work_boxarray; HYPRE_Int i, i2; /*----------------------------------------------------------------------- * Set the vector coefficients *-----------------------------------------------------------------------*/ /* Only clear if not clear already or if force argument is set */ if (hypre_StructVectorBGhostNotClear(vector) || force) { grid = hypre_StructVectorGrid(vector); boxes = hypre_StructGridBoxes(grid); hypre_SetIndex(stride, 1); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); boundary_boxes = hypre_BoxArrayCreate( 0, ndim ); v_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), i); hypre_BoxBoundaryG( v_data_box, grid, boundary_boxes ); vp = hypre_StructVectorBoxData(vector, i); /* box is a grid box, no ghost zones. v_data_box is vector data box, may or may not have ghost zones To get only ghost zones, subtract box from boundary_boxes. */ work_boxarray = hypre_BoxArrayCreate( 0, ndim ); array_of_box = hypre_BoxArrayCreate( 1, ndim ); hypre_BoxArrayBoxes(array_of_box)[0] = *box; hypre_SubtractBoxArrays( boundary_boxes, array_of_box, work_boxarray ); hypre_ForBoxI(i2, boundary_boxes) { bbox = hypre_BoxArrayBox(boundary_boxes, i2); hypre_BoxGetSize(bbox, loop_size); start = hypre_BoxIMin(bbox); hypre_BoxLoop1Begin(hypre_StructVectorNDim(vector), loop_size, v_data_box, start, stride, vi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi ) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(vi) { vp[vi] = 0.0; } hypre_BoxLoop1End(vi); } hypre_BoxArrayDestroy(boundary_boxes); hypre_BoxArrayDestroy(work_boxarray); hypre_BoxArrayDestroy(array_of_box); } hypre_StructVectorBGhostNotClear(vector) = 0; } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorScaleValues( hypre_StructVector *vector, HYPRE_Complex factor ) { HYPRE_Int datai; HYPRE_Complex *data; hypre_Index imin; hypre_Index imax; hypre_Box *box; hypre_Index loop_size; /*----------------------------------------------------------------------- * Set the vector coefficients *-----------------------------------------------------------------------*/ box = hypre_BoxCreate(hypre_StructVectorNDim(vector)); hypre_SetIndex(imin, 1); hypre_SetIndex(imax, 1); hypre_IndexD(imax, 0) = hypre_StructVectorDataSize(vector); hypre_BoxSetExtents(box, imin, imax); data = hypre_StructVectorData(vector); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop1Begin(hypre_StructVectorNDim(vector), loop_size, box, imin, imin, datai); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(datai) { data[datai] *= factor; } hypre_BoxLoop1End(datai); hypre_BoxDestroy(box); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ hypre_CommPkg * hypre_StructVectorGetMigrateCommPkg( hypre_StructVector *from_vector, hypre_StructVector *to_vector ) { hypre_CommInfo *comm_info; hypre_CommPkg *comm_pkg; /*------------------------------------------------------ * Set up hypre_CommPkg *------------------------------------------------------*/ hypre_CreateCommInfoFromGrids(hypre_StructVectorGrid(from_vector), hypre_StructVectorGrid(to_vector), &comm_info); hypre_CommPkgCreate(comm_info, hypre_StructVectorDataSpace(from_vector), hypre_StructVectorDataSpace(to_vector), 1, NULL, 0, hypre_StructVectorComm(from_vector), &comm_pkg); hypre_CommInfoDestroy(comm_info); /* is this correct for periodic? */ return comm_pkg; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorMigrate( hypre_CommPkg *comm_pkg, hypre_StructVector *from_vector, hypre_StructVector *to_vector ) { hypre_CommHandle *comm_handle; /*----------------------------------------------------------------------- * Migrate the vector data *-----------------------------------------------------------------------*/ hypre_InitializeCommunication(comm_pkg, hypre_StructVectorData(from_vector), hypre_StructVectorData(to_vector), 0, 0, &comm_handle); hypre_FinalizeCommunication(comm_handle); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorPrint( const char *filename, hypre_StructVector *vector, HYPRE_Int all ) { FILE *file; char new_filename[255]; hypre_StructGrid *grid; hypre_BoxArray *boxes; hypre_BoxArray *data_space; HYPRE_Int myid; /*---------------------------------------- * Open file *----------------------------------------*/ hypre_MPI_Comm_rank(hypre_StructVectorComm(vector), &myid ); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_printf("Error: can't open output file %s\n", new_filename); exit(1); } /*---------------------------------------- * Print header info *----------------------------------------*/ hypre_fprintf(file, "StructVector\n"); /* print grid info */ hypre_fprintf(file, "\nGrid:\n"); grid = hypre_StructVectorGrid(vector); hypre_StructGridPrint(file, grid); /*---------------------------------------- * Print data *----------------------------------------*/ data_space = hypre_StructVectorDataSpace(vector); if (all) boxes = data_space; else boxes = hypre_StructGridBoxes(grid); hypre_fprintf(file, "\nData:\n"); hypre_PrintBoxArrayData(file, boxes, data_space, 1, hypre_StructGridNDim(grid), hypre_StructVectorData(vector)); /*---------------------------------------- * Close file *----------------------------------------*/ fflush(file); fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ hypre_StructVector * hypre_StructVectorRead( MPI_Comm comm, const char *filename, HYPRE_Int *num_ghost ) { FILE *file; char new_filename[255]; hypre_StructVector *vector; hypre_StructGrid *grid; hypre_BoxArray *boxes; hypre_BoxArray *data_space; HYPRE_Int myid; /*---------------------------------------- * Open file *----------------------------------------*/ hypre_MPI_Comm_rank(comm, &myid ); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_printf("Error: can't open output file %s\n", new_filename); exit(1); } /*---------------------------------------- * Read header info *----------------------------------------*/ hypre_fscanf(file, "StructVector\n"); /* read grid info */ hypre_fscanf(file, "\nGrid:\n"); hypre_StructGridRead(comm,file,&grid); /*---------------------------------------- * Initialize the vector *----------------------------------------*/ vector = hypre_StructVectorCreate(comm, grid); hypre_StructVectorSetNumGhost(vector, num_ghost); hypre_StructVectorInitialize(vector); /*---------------------------------------- * Read data *----------------------------------------*/ boxes = hypre_StructGridBoxes(grid); data_space = hypre_StructVectorDataSpace(vector); hypre_fscanf(file, "\nData:\n"); hypre_ReadBoxArrayData(file, boxes, data_space, 1, hypre_StructGridNDim(grid), hypre_StructVectorData(vector)); /*---------------------------------------- * Assemble the vector *----------------------------------------*/ hypre_StructVectorAssemble(vector); /*---------------------------------------- * Close file *----------------------------------------*/ fclose(file); return vector; } /*-------------------------------------------------------------------------- * The following is used only as a debugging aid. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructVectorMaxValue( hypre_StructVector *vector, HYPRE_Real *max_value, HYPRE_Int *max_index, hypre_Index max_xyz_index ) /* Input: vector, and pointers to where to put returned data. Return value: error flag, 0 means ok. Finds the maximum value in a vector, puts it in max_value. The corresponding index is put in max_index. A hypre_Index corresponding to max_index is put in max_xyz_index. We assume that there is only one box to deal with. */ { HYPRE_Int datai; HYPRE_Real *data; hypre_Index imin; hypre_BoxArray *boxes; hypre_Box *box; hypre_Index loop_size; hypre_Index unit_stride; HYPRE_Int i, ndim; HYPRE_Real maxvalue; HYPRE_Int maxindex; ndim = hypre_StructVectorNDim(vector); boxes = hypre_StructVectorDataSpace(vector); if ( hypre_BoxArraySize(boxes)!=1 ) { /* if more than one box, the return system max_xyz_index is too simple if needed, fix later */ hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } hypre_SetIndex(unit_stride, 1); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); /*v_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), i);*/ data = hypre_StructVectorBoxData(vector, i); hypre_BoxGetSize(box, loop_size); hypre_CopyIndex( hypre_BoxIMin(box), imin ); hypre_BoxLoop1Begin(ndim, loop_size, box, imin, unit_stride, datai); maxindex = hypre_BoxIndexRank( box, imin ); maxvalue = data[maxindex]; hypre_SetIndex(max_xyz_index, 0); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(datai) { if ( data[datai] > maxvalue ) { maxvalue = data[datai]; maxindex = datai; hypre_BoxLoopGetIndex(max_xyz_index); } } hypre_BoxLoop1End(datai); hypre_AddIndexes(max_xyz_index, imin, ndim, max_xyz_index); } *max_value = maxvalue; *max_index = maxindex; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructVectorClone * Returns a complete copy of x - a deep copy, with its own copy of the data. *--------------------------------------------------------------------------*/ hypre_StructVector * hypre_StructVectorClone( hypre_StructVector *x) { MPI_Comm comm = hypre_StructVectorComm(x); hypre_StructGrid *grid = hypre_StructVectorGrid(x); hypre_BoxArray *data_space = hypre_StructVectorDataSpace(x); HYPRE_Int *data_indices = hypre_StructVectorDataIndices(x); HYPRE_Int data_size = hypre_StructVectorDataSize(x); HYPRE_Int ndim = hypre_StructGridNDim(grid); HYPRE_Int data_space_size = hypre_BoxArraySize(data_space); HYPRE_Int i; hypre_StructVector *y = hypre_StructVectorCreate(comm, grid); hypre_StructVectorDataSize(y) = data_size; hypre_StructVectorDataSpace(y) = hypre_BoxArrayDuplicate(data_space); hypre_StructVectorData(y) = hypre_CTAlloc(HYPRE_Complex,data_size); hypre_StructVectorDataIndices(y) = hypre_CTAlloc(HYPRE_Int, data_space_size); for (i=0; i < data_space_size; i++) hypre_StructVectorDataIndices(y)[i] = data_indices[i]; hypre_StructVectorCopy( x, y ); for (i=0; i < 2*ndim; i++) hypre_StructVectorNumGhost(y)[i] = hypre_StructVectorNumGhost(x)[i]; hypre_StructVectorBGhostNotClear(y) = hypre_StructVectorBGhostNotClear(x); hypre_StructVectorGlobalSize(y) = hypre_StructVectorGlobalSize(x); return y; }
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/draw.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/magick.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/nt-base-private.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/option.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resize-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { MagickRealType (*filter)(const MagickRealType,const ResizeFilter *), (*window)(const MagickRealType,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension scaling to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ coefficient[7]; /* cubic coefficents for BC-cubic filters */ ResizeWeightingFunctionType filterWeightingType, windowWeightingType; size_t signature; }; /* Forward declaractions. */ static MagickRealType I0(MagickRealType x), BesselOrderOne(MagickRealType), Sinc(const MagickRealType, const ResizeFilter *), SincFast(const MagickRealType, const ResizeFilter *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided. % % They are internal to this module only. See AcquireResizeFilterInfo() for % details of the access to these functions, via the GetResizeFilterSupport() % and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const MagickRealType x, % const MagickRealType support) % % A description of each parameter follows: % % o x: the distance from the sampling point generally in the range of 0 to % support. The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: current filter information. This allows function to % access support, and possibly other pre-calculated information defining % the functions. % */ static MagickRealType Blackman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2nd order cosine windowing function: 0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x) Refactored by Chantal Racette and Nicolas Robidoux to one trig call and five flops. */ const MagickRealType cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.34+cosine*(0.5+cosine*0.16)); } static MagickRealType Bohman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function: (1-x) cos(pi x) + sin(pi x) / pi. Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops, taking advantage of the fact that the support of Bohman is 1.0 (so that we know that sin(pi x) >= 0). */ const double cosine=cos((double) (MagickPI*x)); const double sine=sqrt(1.0-cosine*cosine); magick_unreferenced(resize_filter); return((MagickRealType) ((1.0-x)*cosine+(1.0/MagickPI)*sine)); } static MagickRealType Box(const MagickRealType magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { /* A Box filter is a equal weighting function (all weights equal). DO NOT LIMIT results by support or resize point sampling will work as it requests points beyond its normal 0.0 support size. */ magick_unreferenced(x); magick_unreferenced(resize_filter); return(1.0); } static MagickRealType Cosine(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: cos((pi/2)*x). */ magick_unreferenced(resize_filter); return((MagickRealType)cos((double) (MagickPI2*x))); } static MagickRealType CubicBC(const MagickRealType x, const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears Spline B = 1 C = 0 B-Spline Gaussian approximation Hermite B = 0 C = 0 B-Spline interpolator See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/ Mitchell.pdf. Coefficents are determined from B,C values: P0 = ( 6 - 2*B )/6 = coeff[0] P1 = 0 P2 = (-18 +12*B + 6*C )/6 = coeff[1] P3 = ( 12 - 9*B - 6*C )/6 = coeff[2] Q0 = ( 8*B +24*C )/6 = coeff[3] Q1 = ( -12*B -48*C )/6 = coeff[4] Q2 = ( 6*B +30*C )/6 = coeff[5] Q3 = ( - 1*B - 6*C )/6 = coeff[6] which are used to define the filter: P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2 which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->coefficient[0]+x*(x* (resize_filter->coefficient[1]+x*resize_filter->coefficient[2]))); if (x < 2.0) return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x* (resize_filter->coefficient[5]+x*resize_filter->coefficient[6]))); return(0.0); } static MagickRealType Gaussian(const MagickRealType x, const ResizeFilter *resize_filter) { /* Gaussian with a sigma = 1/2 (or as user specified) Gaussian Formula (1D) ... exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2)) Gaussian Formula (2D) ... exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) or for radius exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) Note that it is only a change from 1-d to radial form is in the normalization multiplier which is not needed or used when Gaussian is used as a filter. The constants are pre-calculated... coeff[0]=sigma; coeff[1]=1.0/(2.0*sigma^2); coeff[2]=1.0/(sqrt(2*PI)*sigma^2); exp( -coeff[1]*(x^2)) ) * coeff[2]; However the multiplier coeff[1] is need, the others are informative only. This separates the gaussian 'sigma' value from the 'blur/support' settings allowing for its use in special 'small sigma' gaussians, without the filter 'missing' pixels because the support becomes too small. */ return(exp((double)(-resize_filter->coefficient[1]*x*x))); } static MagickRealType Hanning(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: 0.5+0.5*cos(pi*x). */ const MagickRealType cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.5+0.5*cosine); } static MagickRealType Hamming(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const MagickRealType cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.54+0.46*cosine); } static MagickRealType Jinc(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions. http://mathworld.wolfram.com/JincFunction.html and page 11 of http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf The original "zoom" program by Paul Heckbert called this "Bessel". But really it is more accurately named "Jinc". */ magick_unreferenced(resize_filter); if (x == 0.0) return((MagickRealType) (0.5*MagickPI)); return(BesselOrderOne((MagickRealType) MagickPI*x)/x); } static MagickRealType Kaiser(const MagickRealType x, const ResizeFilter *resize_filter) { /* Kaiser Windowing Function (bessel windowing) I0( beta * sqrt( 1-x^2) ) / IO(0) Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5). However it is typically defined in terms of Alpha*PI The normalization factor (coeff[1]) is not actually needed, but without it the filters has a large value at x=0 making it difficult to compare the function with other windowing functions. */ return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]* sqrt((double) (1.0-x*x)))); } static MagickRealType Lagrange(const MagickRealType x, const ResizeFilter *resize_filter) { MagickRealType value; register ssize_t i; ssize_t n, order; /* Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is: for a support of 2, it gives a lagrange-4 (piecewise cubic function). "n" identifies the piece of the piecewise polynomial. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064. */ if (x > resize_filter->support) return(0.0); order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */ n=(ssize_t) (resize_filter->window_support+x); value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static MagickRealType Quadratic(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ magick_unreferenced(resize_filter); if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static MagickRealType Sinc(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Scaled sinc(x) function using a trig call: sinc(x) == sin(pi x)/(pi x). */ magick_unreferenced(resize_filter); if (x != 0.0) { const MagickRealType alpha=(MagickRealType) (MagickPI*x); return(sin((double) alpha)/alpha); } return((MagickRealType) 1.0); } static MagickRealType SincFast(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Approximations of the sinc function sin(pi x)/(pi x) over the interval [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding from the Natural Sciences and Engineering Research Council of Canada. Although the approximations are polynomials (for low order of approximation) and quotients of polynomials (for higher order of approximation) and consequently are similar in form to Taylor polynomials / Pade approximants, the approximations are computed with a completely different technique. Summary: These approximations are "the best" in terms of bang (accuracy) for the buck (flops). More specifically: Among the polynomial quotients that can be computed using a fixed number of flops (with a given "+ - * / budget"), the chosen polynomial quotient is the one closest to the approximated function with respect to maximum absolute relative error over the given interval. The Remez algorithm, as implemented in the boost library's minimax package, is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/ math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html If outside of the interval of approximation, use the standard trig formula. */ magick_unreferenced(resize_filter); if (x > 4.0) { const MagickRealType alpha=(MagickRealType) (MagickPI*x); return(sin((double) alpha)/alpha); } { /* The approximations only depend on x^2 (sinc is an even function). */ const MagickRealType xx = x*x; #if MAGICKCORE_QUANTUM_DEPTH <= 8 /* Maximum absolute relative error 6.3e-6 < 1/2^17. */ const double c0 = 0.173610016489197553621906385078711564924e-2L; const double c1 = -0.384186115075660162081071290162149315834e-3L; const double c2 = 0.393684603287860108352720146121813443561e-4L; const double c3 = -0.248947210682259168029030370205389323899e-5L; const double c4 = 0.107791837839662283066379987646635416692e-6L; const double c5 = -0.324874073895735800961260474028013982211e-8L; const double c6 = 0.628155216606695311524920882748052490116e-10L; const double c7 = -0.586110644039348333520104379959307242711e-12L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #elif MAGICKCORE_QUANTUM_DEPTH <= 16 /* Max. abs. rel. error 2.2e-8 < 1/2^25. */ const double c0 = 0.173611107357320220183368594093166520811e-2L; const double c1 = -0.384240921114946632192116762889211361285e-3L; const double c2 = 0.394201182359318128221229891724947048771e-4L; const double c3 = -0.250963301609117217660068889165550534856e-5L; const double c4 = 0.111902032818095784414237782071368805120e-6L; const double c5 = -0.372895101408779549368465614321137048875e-8L; const double c6 = 0.957694196677572570319816780188718518330e-10L; const double c7 = -0.187208577776590710853865174371617338991e-11L; const double c8 = 0.253524321426864752676094495396308636823e-13L; const double c9 = -0.177084805010701112639035485248501049364e-15L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9)))))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #else /* Max. abs. rel. error 1.2e-12 < 1/2^39. */ const double c0 = 0.173611111110910715186413700076827593074e-2L; const double c1 = -0.289105544717893415815859968653611245425e-3L; const double c2 = 0.206952161241815727624413291940849294025e-4L; const double c3 = -0.834446180169727178193268528095341741698e-6L; const double c4 = 0.207010104171026718629622453275917944941e-7L; const double c5 = -0.319724784938507108101517564300855542655e-9L; const double c6 = 0.288101675249103266147006509214934493930e-11L; const double c7 = -0.118218971804934245819960233886876537953e-13L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); const double d0 = 1.0L; const double d1 = 0.547981619622284827495856984100563583948e-1L; const double d2 = 0.134226268835357312626304688047086921806e-2L; const double d3 = 0.178994697503371051002463656833597608689e-4L; const double d4 = 0.114633394140438168641246022557689759090e-6L; const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4))); return((MagickRealType) ((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p)); #endif } } static MagickRealType Triangle(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function for Sinc(). */ magick_unreferenced(resize_filter); if (x < 1.0) return(1.0-x); return(0.0); } static MagickRealType Welsh(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Welsh parabolic windowing filter. */ magick_unreferenced(resize_filter); if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Spline Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Jinc (Bessel) % % Windowed Sinc/Jinc Filters % Blackman Bohman Lanczos % Hann Hamming Cosine % Kaiser Welch Parzen % Bartlett % % Special Purpose Filters % Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp % Robidoux RobidouxSharp % % The users "-filter" selection is used to lookup the default 'expert' % settings for that filter from a internal table. However any provided % 'expert' settings (see below) may override this selection. % % FIR filters are used as is, and are limited to that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (currently 1.5 or approximately 3*sigma % as recommended by many references) % % The special a 'cylindrical' filter flag will promote the default 4-lobed % Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better % suited to this style of image resampling. This typically happens when using % such a filter for images distortions. % % SPECIFIC FILTERS: % % Directly requesting 'Sinc', 'Jinc' function as a filter will force the use % of function without any windowing, or promotion for cylindrical usage. This % is not recommended, except by image processing experts, especially as part % of expert option filter function selection. % % Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is % computed using the traditional sin(pi*x)/(pi*x); it is selected if the user % specifically specifies the use of a Sinc filter. SincFast uses highly % accurate (and fast) polynomial (low Q) and rational (high Q) approximations, % and will be used by default in most cases. % % The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted % to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use). % The Sinc version is the most popular windowed filter. % % LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of % the Lanczos filter, specifically designed for EWA distortion (as a % Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos % (Sinc-Sinc) filter. The chosen blur value comes as close as possible to % satisfying the following condition without changing the character of the % corresponding EWA filter: % % 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with % only vertical or horizontal features are preserved when performing 'no-op" % with EWA distortion. % % The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos % filters. The 'sharp' version uses a blur factor of 0.9549963639785485, % again chosen because the resulting EWA filter comes as close as possible to % satisfying the above condition. % % Robidoux is another filter tuned for EWA. It is the Keys cubic filter % defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op' % Vertical and Horizontal Line Preservation Condition" exactly, and it % moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns % out to be close to both Mitchell and Lanczos2Sharp. For example, its first % crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the % first crossing of Mitchell and Lanczos2Sharp. % % RodidouxSharp is a slightly sharper version of Rodidoux, some believe it % is too sharp. It is designed to minimize the maximum possible change in % a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op % conditions. Amazingly Mitchell falls roughly between Rodidoux and % RodidouxSharp, though this seems to have been pure coincidence. % % 'EXPERT' OPTIONS: % % These artifact "defines" are not recommended for production use without % expert knowledge of resampling, filtering, and the effects they have on the % resulting resampled (resized or distorted) image. % % They can be used to override any and all filter default, and it is % recommended you make good use of "filter:verbose" to make sure that the % overall effect of your selection (before and after) is as expected. % % "filter:verbose" controls whether to output the exact results of the % filter selections made, as well as plotting data for graphing the % resulting filter over the filters support range. % % "filter:filter" select the main function associated with this filter % name, as the weighting function of the filter. This can be used to % set a windowing function as a weighting function, for special % purposes, such as graphing. % % If a "filter:window" operation has not been provided, a 'Box' % windowing function will be set to denote that no windowing function is % being used. % % "filter:window" Select this windowing function for the filter. While any % filter could be used as a windowing function, using the 'first lobe' of % that filter over the whole support window, using a non-windowing % function is not advisible. If no weighting filter function is specified % a 'SincFast' filter is used. % % "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a % simpler method of setting filter support size that will correctly % handle the Sinc/Jinc switch for an operators filtering requirements. % Only integers should be given. % % "filter:support" Set the support size for filtering to the size given. % This not recommended for Sinc/Jinc windowed filters (lobes should be % used instead). This will override any 'filter:lobes' option. % % "filter:win-support" Scale windowing function to this size instead. This % causes the windowing (or self-windowing Lagrange filter) to act is if % the support window it much much larger than what is actually supplied % to the calling operator. The filter however is still clipped to the % real support size given, by the support range supplied to the caller. % If unset this will equal the normal filter support size. % % "filter:blur" Scale the filter and support window by this amount. A value % of > 1 will generally result in a more blurred image with more ringing % effects, while a value <1 will sharpen the resulting image with more % aliasing effects. % % "filter:sigma" The sigma value to use for the Gaussian filter only. % Defaults to '1/2'. Using a different sigma effectively provides a % method of using the filter as a 'blur' convolution. Particularly when % using it for Distort. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic filter. % If only one of these are given it is assumes to be a 'Keys' type of % filter such that B+2C=1, where Keys 'alpha' value = C. % % Examples: % % Set a true un-windowed Sinc filter with 10 lobes (very slow): % -define filter:filter=Sinc % -define filter:lobes=8 % % Set an 8 lobe Lanczos (Sinc or Jinc) filter: % -filter Lanczos % -define filter:lobes=8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterTypes filter_type,const MagickBooleanType cylindrical, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % The artifact settings listed above will override those selections. % % o blur: blur the filter by this amount, use 1.0 if unknown. Image % artifact "filter:blur" will override this API call usage, including any % internal change (such as for cylindrical usage). % % o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial) % filter (Jinc). % % o exception: return any errors or warnings in this structure. % */ MagickExport ResizeFilter *AcquireResizeFilter(const Image *image, const FilterTypes filter,const MagickRealType blur, const MagickBooleanType cylindrical,ExceptionInfo *exception) { const char *artifact; FilterTypes filter_type, window_type; MagickRealType B, C, value; register ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a "cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was specifically requested by the user. WARNING: The order of this table must match the order of the FilterTypes enumeration specified in "resample.h", or the filter names will not match the filter being setup. You can check filter setups with the "filter:verbose" expert setting. */ static struct { FilterTypes filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */ { PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */ { BoxFilter, BoxFilter }, /* Box averaging filter */ { TriangleFilter, BoxFilter }, /* Linear interpolation filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFastFilter, HanningFilter }, /* Hanning -- cosine-sinc */ { SincFastFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */ { GaussianFilter, BoxFilter }, /* Gaussian blur filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */ { CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */ { CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */ { MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */ { JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */ { SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */ { SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */ { SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */ { LanczosFilter, WelshFilter }, /* Welch -- parabolic (3 lobe) */ { SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */ { SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */ { SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */ { LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */ { LanczosSharpFilter, LanczosSharpFilter }, /* | these require */ { Lanczos2Filter, Lanczos2Filter }, /* | special handling */ { Lanczos2SharpFilter, Lanczos2SharpFilter }, { RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */ { RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */ { LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */ { SplineFilter, BoxFilter }, /* Spline Cubic Filter */ { LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */ }; /* Table mapping the filter/window from the above table to an actual function. The default support size for that filter as a weighting function, the range to scale with to use that function as a sinc windowing function, (typ 1.0). Note that the filter_type -> function is 1 to 1 except for Sinc(), SincFast(), and CubicBC() functions, which may have multiple filter to function associations. See "filter:verbose" handling below for the function -> filter mapping. */ static struct { MagickRealType (*function)(const MagickRealType,const ResizeFilter*); double support, /* Default lobes/support size of the weighting filter. */ scale, /* Support when function used as a windowing function Typically equal to the location of the first zero crossing. */ B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */ ResizeWeightingFunctionType weightingFunctionType; } const filters[SentinelFilter] = { /* .--- support window (if used as a Weighting Function) | .--- first crossing (if used as a Windowing Function) | | .--- B value for Cubic Function | | | .---- C value for Cubic Function | | | | */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */ { Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */ { CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */ { Hanning, 1.0, 1.0, 0.0, 0.0, HanningWeightingFunction }, /* Hann, cosine window */ { Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */ { Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */ { Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */ { Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */ { CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */ { CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */ { Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */ { Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */ { SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */ { Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */ { Welsh, 1.0, 1.0, 0.0, 0.0, WelshWeightingFunction }, /* Welsh (parabolic window) */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */ { Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */ { Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */ /* Robidoux: Keys cubic close to Lanczos2D sharpened */ { CubicBC, 2.0, 1.1685777620836932, 0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction }, /* RobidouxSharp: Sharper version of Robidoux */ { CubicBC, 2.0, 1.105822933719019, 0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction }, { Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */ }; /* The known zero crossings of the Jinc() or more accurately the Jinc(x*PI) function being used as a filter. It is used by the "filter:lobes" expert setting and for 'lobes' for Jinc functions in the previous table. This way users do not have to deal with the highly irrational lobe sizes of the Jinc filter. Values taken from http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using Jv-function with v=1, then dividing by PI. */ static double jinc_zeros[16] = { 1.2196698912665045, 2.2331305943815286, 3.2383154841662362, 4.2410628637960699, 5.2427643768701817, 6.2439216898644877, 7.2447598687199570, 8.2453949139520427, 9.2458926849494673, 10.246293348754916, 11.246622794877883, 12.246898461138105, 13.247132522181061, 14.247333735806849, 15.247508563037300, 16.247661874700962 }; /* Allocate resize filter. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) exception; resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter)); if (resize_filter == (ResizeFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(resize_filter,0,sizeof(*resize_filter)); /* Defaults for the requested filter. */ filter_type=mapping[filter].filter; window_type=mapping[filter].window; resize_filter->blur = blur; /* function argument blur factor (1.0) */ /* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */ if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) && (filter != SincFastFilter)) filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */ /* Expert filter setting override */ artifact=GetImageArtifact(image,"filter:filter"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { /* Raw filter request - no window function. */ filter_type=(FilterTypes) option; window_type=BoxFilter; } /* Filter override with a specific window function. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) window_type=(FilterTypes) option; } } else { /* Window specified, but no filter function? Assume Sinc/Jinc. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { filter_type=cylindrical != MagickFalse ? JincFilter : SincFastFilter; window_type=(FilterTypes) option; } } } /* Assign the real functions to use for the filters selected. */ resize_filter->filter=filters[filter_type].function; resize_filter->support=filters[filter_type].support; resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType; resize_filter->window=filters[window_type].function; resize_filter->windowWeightingType=filters[window_type].weightingFunctionType; resize_filter->scale=filters[window_type].scale; resize_filter->signature=MagickCoreSignature; /* Filter Modifications for orthogonal/cylindrical usage */ if (cylindrical != MagickFalse) switch (filter_type) { case BoxFilter: /* Support for Cylindrical Box should be sqrt(2)/2 */ resize_filter->support=(MagickRealType) MagickSQ1_2; break; case LanczosFilter: case LanczosSharpFilter: case Lanczos2Filter: case Lanczos2SharpFilter: case LanczosRadiusFilter: resize_filter->filter=filters[JincFilter].function; resize_filter->window=filters[JincFilter].function; resize_filter->scale=filters[JincFilter].scale; /* number of lobes (support window size) remain unchanged */ break; default: break; } /* Global Sharpening (regardless of orthoginal/cylindrical) */ switch (filter_type) { case LanczosSharpFilter: resize_filter->blur *= (MagickRealType) 0.9812505644269356; break; case Lanczos2SharpFilter: resize_filter->blur *= (MagickRealType) 0.9549963639785485; break; /* case LanczosRadius: blur adjust is done after lobes */ default: break; } /* Expert Option Modifications. */ /* User Gaussian Sigma Override - no support change */ if ((resize_filter->filter == Gaussian) || (resize_filter->window == Gaussian) ) { value=0.5; /* guassian sigma default, half pixel */ artifact=GetImageArtifact(image,"filter:sigma"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Gaussian */ resize_filter->coefficient[0]=value; /* note sigma too */ resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */ resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value); /* normalization - not actually needed or used! */ if ( value > 0.5 ) resize_filter->support *= value/0.5; /* increase support */ } /* User Kaiser Alpha Override - no support change */ if ((resize_filter->filter == Kaiser) || (resize_filter->window == Kaiser) ) { value=6.5; /* default beta value for Kaiser bessel windowing function */ artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */ if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-beta"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-alpha"); if (artifact != (const char *) NULL) value=(MagickRealType) (StringToDouble(artifact,(char **) NULL)*MagickPI); /* Define coefficents for Kaiser Windowing Function */ resize_filter->coefficient[0]=value; /* alpha */ resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */ } /* Support Overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { ssize_t lobes; lobes=(ssize_t) StringToLong(artifact); if (lobes < 1) lobes=1; resize_filter->support=(MagickRealType) lobes; } /* Convert a Jinc function lobes value to a real support value */ if (resize_filter->filter == Jinc) { if (resize_filter->support > 16) resize_filter->support=jinc_zeros[15]; /* largest entry in table */ else resize_filter->support=jinc_zeros[((long)resize_filter->support)-1]; /* blur this filter so support is a integer value (lobes dependant) */ if (filter_type == LanczosRadiusFilter) { resize_filter->blur *= floor(resize_filter->support)/ resize_filter->support; } } /* Expert Blur Override */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur*=StringToDouble(artifact,(char **) NULL); if (resize_filter->blur < MagickEpsilon) resize_filter->blur=(MagickRealType) MagickEpsilon; /* Expert override of the support setting */ artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL)); /* Scale windowing function separately to the support 'clipping' window that calling operator is planning to actually use. (Expert override) */ resize_filter->window_support=resize_filter->support; /* default */ artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL)); /* Adjust window function scaling to match windowing support for weighting function. This avoids a division on every filter call. */ resize_filter->scale/=resize_filter->window_support; /* * Set Cubic Spline B,C values, calculate Cubic coefficients. */ B=0.0; C=0.0; if ((resize_filter->filter == CubicBC) || (resize_filter->window == CubicBC) ) { B=filters[filter_type].B; C=filters[filter_type].C; if (filters[window_type].function == CubicBC) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=StringToDouble(artifact,(char **) NULL); C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */ artifact=GetImageArtifact(image,"filter:c"); /* user C override */ if (artifact != (const char *) NULL) C=StringToDouble(artifact,(char **) NULL); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=StringToDouble(artifact,(char **) NULL); B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */ } } /* Convert B,C values into Cubic Coefficents. See CubicBC(). */ { const double twoB = B+B; resize_filter->coefficient[0]=1.0-(1.0/3.0)*B; resize_filter->coefficient[1]=-3.0+twoB+C; resize_filter->coefficient[2]=2.0-1.5*B-C; resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C; resize_filter->coefficient[4]=-8.0*C-twoB; resize_filter->coefficient[5]=B+5.0*C; resize_filter->coefficient[6]=(-1.0/6.0)*B-C; } } /* Expert Option Request for verbose details of the resulting filter. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp master { #endif artifact=GetImageArtifact(image,"filter:verbose"); if (IsMagickTrue(artifact) != MagickFalse) { double support, x; /* Set the weighting function properly when the weighting function may not exactly match the filter of the same name. EG: a Point filter is really uses a Box weighting function with a different support than is typically used. */ if (resize_filter->filter == Box) filter_type=BoxFilter; if (resize_filter->filter == Sinc) filter_type=SincFilter; if (resize_filter->filter == SincFast) filter_type=SincFastFilter; if (resize_filter->filter == Jinc) filter_type=JincFilter; if (resize_filter->filter == CubicBC) filter_type=CubicFilter; if (resize_filter->window == Box) window_type=BoxFilter; if (resize_filter->window == Sinc) window_type=SincFilter; if (resize_filter->window == SincFast) window_type=SincFastFilter; if (resize_filter->window == Jinc) window_type=JincFilter; if (resize_filter->window == CubicBC) window_type=CubicFilter; /* Report Filter Details. */ support=GetResizeFilterSupport(resize_filter); /* practical_support */ (void) FormatLocaleFile(stdout,"# Resampling Filter (for graphing)\n#\n"); (void) FormatLocaleFile(stdout,"# filter = %s\n", CommandOptionToMnemonic(MagickFilterOptions,filter_type)); (void) FormatLocaleFile(stdout,"# window = %s\n", CommandOptionToMnemonic(MagickFilterOptions,window_type)); (void) FormatLocaleFile(stdout,"# support = %.*g\n", GetMagickPrecision(),(double) resize_filter->support); (void) FormatLocaleFile(stdout,"# window-support = %.*g\n", GetMagickPrecision(),(double) resize_filter->window_support); (void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n", GetMagickPrecision(), (double)resize_filter->blur); if ( filter_type == GaussianFilter || window_type == GaussianFilter ) (void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n", GetMagickPrecision(), (double)resize_filter->coefficient[0]); if ( filter_type == KaiserFilter || window_type == KaiserFilter ) (void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n", GetMagickPrecision(), (double)resize_filter->coefficient[0]); (void) FormatLocaleFile(stdout,"# practical-support = %.*g\n", GetMagickPrecision(), (double)support); if ( filter_type == CubicFilter || window_type == CubicFilter ) (void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n", GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C); (void) FormatLocaleFile(stdout,"\n"); /* Output values of resulting filter graph -- for graphing filter result. */ for (x=0.0; x <= support; x+=0.01f) (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,GetMagickPrecision(), (double) GetResizeFilterWeight(resize_filter,x)); /* A final value so gnuplot can graph the 'stop' properly. */ (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support, GetMagickPrecision(),0.0); } /* Output the above once only for each image - remove setting */ (void) DeleteImageArtifact((Image *) image,"filter:verbose"); #if defined(MAGICKCORE_OPENMP_SUPPORT) } #endif return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % This is shortcut function for a fast interpolative resize using mesh % interpolation. It works well for small resizes of less than +/- 50% % of the original image size. For larger resizing on images a full % filtered and slower resize function should be used instead. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { return(InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel, exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0. This is used to create the Jinc() filter function below. % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % MagickRealType BesselOrderOne(MagickRealType x) % % A description of each parameter follows: % % o x: MagickRealType value. % */ #undef I0 static MagickRealType I0(MagickRealType x) { MagickRealType sum, t, y; register ssize_t i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((MagickRealType) i*i); } return(sum); } #undef J1 static MagickRealType J1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static MagickRealType P1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static MagickRealType Q1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static MagickRealType BesselOrderOne(MagickRealType x) { MagickRealType p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)- cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+ cos((double) x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the DestroyResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickExport ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); resize_filter->signature=(~MagickCoreSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % MagickRealType GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickExport MagickRealType *GetResizeFilterCoefficient( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return((MagickRealType *) resize_filter->coefficient); } MagickExport MagickRealType GetResizeFilterBlur( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->blur); } MagickExport MagickRealType GetResizeFilterScale( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->scale); } MagickExport MagickRealType GetResizeFilterWindowSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->window_support); } MagickExport ResizeWeightingFunctionType GetResizeFilterWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->filterWeightingType); } MagickExport ResizeWeightingFunctionType GetResizeFilterWindowWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->windowWeightingType); } MagickExport MagickRealType GetResizeFilterSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % MagickRealType GetResizeFilterWeight(const ResizeFilter *resize_filter, % const MagickRealType x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickExport MagickRealType GetResizeFilterWeight( const ResizeFilter *resize_filter,const MagickRealType x) { MagickRealType scale, weight, x_blur; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point or Box Filter -- avoid division by zero */ else { scale=resize_filter->scale; scale=resize_filter->window(x_blur*scale,resize_filter); } weight=scale*resize_filter->filter(x_blur,resize_filter); return(weight); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolativeResizeImage() resizes an image using the specified % interpolation method. % % The format of the InterpolativeResizeImage method is: % % Image *InterpolativeResizeImage(const Image *image,const size_t columns, % const size_t rows,const InterpolatePixelMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *InterpolativeResizeImage(const Image *image, const size_t columns,const size_t rows,const InterpolatePixelMethod method, ExceptionInfo *exception) { #define InterpolativeResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; Image *resize_image; MagickBooleanType status; MagickOffsetType progress; PointInfo scale; ssize_t y; /* Interpolatively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse) { InheritException(exception,&resize_image->exception); resize_image=DestroyImage(resize_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); scale.x=(double) image->columns/resize_image->columns; scale.y=(double) image->rows/resize_image->rows; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { MagickPixelPacket pixel; PointInfo offset; register IndexPacket *magick_restrict resize_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (PixelPacket *) NULL) continue; resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); GetMagickPixelPacket(image,&pixel); offset.y=((MagickRealType) y+0.5)*scale.y-0.5; for (x=0; x < (ssize_t) resize_image->columns; x++) { offset.x=((MagickRealType) x+0.5)*scale.x-0.5; status=InterpolateMagickPixelPacket(image,image_view,method,offset.x, offset.y,&pixel,exception); if (status == MagickFalse) break; SetPixelPacket(resize_image,&pixel,q,resize_indexes+x); q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) continue; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InterpolativeResizeImage) #endif proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) resize_image=DestroyImage(resize_image); return(resize_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image, % const size_t columns,const size_t rows, % const double delta_x,const double rigidity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns, const size_t rows,const double delta_x,const double rigidity, ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" CacheView *rescale_view; const char *map; guchar *packet; Image *rescale_image; int x, y; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MagickPixelPacket pixel; MemoryInfo *pixel_info; unsigned char *pixels; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ResizeImage(image,columns,rows,image->filter,image->blur,exception)); map="RGB"; if (image->matte != MagickFalse) map="RGBA"; if (image->colorspace == CMYKColorspace) { map="CMYK"; if (image->matte != MagickFalse) map="CMYKA"; } pixel_info=AcquireVirtualMemory(image->columns,image->rows*strlen(map)* sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) return((Image *) NULL); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); status=ExportImagePixels(image,0,0,image->columns,image->rows,map,CharPixel, pixels,exception); if (status == MagickFalse) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } carver=lqr_carver_new(pixels,(int) image->columns,(int) image->rows, (int) strlen(map)); if (carver == (LqrCarver *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_carver_set_preserve_input_image(carver); lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows); (void) lqr_status; rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass) == MagickFalse) { InheritException(exception,&rescale_image->exception); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } GetMagickPixelPacket(rescale_image,&pixel); (void) lqr_carver_scan_reset(carver); rescale_view=AcquireAuthenticCacheView(rescale_image,exception); while (lqr_carver_scan(carver,&x,&y,&packet) != 0) { register IndexPacket *magick_restrict rescale_indexes; register PixelPacket *magick_restrict q; q=QueueCacheViewAuthenticPixels(rescale_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; rescale_indexes=GetCacheViewAuthenticIndexQueue(rescale_view); pixel.red=QuantumRange*(packet[0]/255.0); pixel.green=QuantumRange*(packet[1]/255.0); pixel.blue=QuantumRange*(packet[2]/255.0); if (image->colorspace != CMYKColorspace) { if (image->matte != MagickFalse) pixel.opacity=QuantumRange-QuantumRange*(packet[3]/255.0); } else { pixel.index=QuantumRange*(packet[3]/255.0); if (image->matte != MagickFalse) pixel.opacity=QuantumRange-QuantumRange*(packet[4]/255.0); } SetPixelPacket(rescale_image,&pixel,q,rescale_indexes); if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse) break; } rescale_view=DestroyCacheView(rescale_view); /* Relinquish resources. */ pixel_info=RelinquishVirtualMemory(pixel_info); lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const size_t magick_unused(columns),const size_t magick_unused(rows), const double magick_unused(delta_x),const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","`%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() doubles the size of the image with a pixel art scaling % algorithm. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { #define MagnifyImageTag "Magnify/Image" CacheView *image_view, *magnify_view; Image *magnify_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize magnified image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue, exception); if (magnify_image == (Image *) NULL) return((Image *) NULL); /* Magnify image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); magnify_view=AcquireAuthenticCacheView(magnify_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,magnify_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict magnify_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } magnify_indexes=GetCacheViewAuthenticIndexQueue(magnify_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity[9]; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict r; register ssize_t i; /* Magnify this row of pixels. */ p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 9; i++) intensity[i]=GetPixelIntensity(image,p+i); r=q; if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) || (fabs(intensity[3]-intensity[5]) < MagickEpsilon)) { /* Clone center pixel. */ *r=p[4]; r++; *r=p[4]; r+=(magnify_image->columns-1); *r=p[4]; r++; *r=p[4]; } else { /* Selectively clone pixel. */ if (fabs(intensity[1]-intensity[3]) < MagickEpsilon) *r=p[3]; else *r=p[4]; r++; if (fabs(intensity[1]-intensity[5]) < MagickEpsilon) *r=p[5]; else *r=p[4]; r+=(magnify_image->columns-1); if (fabs(intensity[3]-intensity[7]) < MagickEpsilon) *r=p[3]; else *r=p[4]; r++; if (fabs(intensity[5]-intensity[7]) < MagickEpsilon) *r=p[5]; else *r=p[4]; } if (indexes != (const IndexPacket *) NULL) { register IndexPacket *r; /* Magnify the colormap indexes. */ r=magnify_indexes; if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) || (fabs(intensity[3]-intensity[5]) < MagickEpsilon)) { /* Clone center pixel. */ *r=indexes[4]; r++; *r=indexes[4]; r+=(magnify_image->columns-1); *r=indexes[4]; r++; *r=indexes[4]; } else { /* Selectively clone pixel. */ if (fabs(intensity[1]-intensity[3]) < MagickEpsilon) *r=indexes[3]; else *r=indexes[4]; r++; if (fabs(intensity[1]-intensity[5]) < MagickEpsilon) *r=indexes[5]; else *r=indexes[4]; r+=(magnify_image->columns-1); if (fabs(intensity[3]-intensity[7]) < MagickEpsilon) *r=indexes[3]; else *r=indexes[4]; r++; if (fabs(intensity[5]-intensity[7]) < MagickEpsilon) *r=indexes[5]; else *r=indexes[4]; } magnify_indexes+=2; } q+=2; } if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MagnifyImage) #endif proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } magnify_view=DestroyCacheView(magnify_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) magnify_image=DestroyImage(magnify_image); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally to % half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter, 1.0,exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterTypes filter,const double blur, ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; size_t height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) (x_resolution*image->columns/(image->x_resolution == 0.0 ? 72.0 : image->x_resolution)+0.5); height=(size_t) (y_resolution*image->rows/(image->y_resolution == 0.0 ? 72.0 : image->y_resolution)+0.5); resample_image=ResizeImage(image,width,height,filter,blur,exception); if (resample_image != (Image *) NULL) { resample_image->x_resolution=x_resolution; resample_image->y_resolution=y_resolution; } return(resample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using the given % filter (see AcquireFilterInfo()). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const size_t columns, % const size_t rows,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. Typically set % this to 1.0. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { MagickRealType weight; ssize_t pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { register ssize_t i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishAlignedMemory( contribution[i]); contribution=(ContributionInfo **) RelinquishMagickMemory(contribution); return(contribution); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { register ssize_t i; ContributionInfo **contribution; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) memset(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (ssize_t) number_threads; i++) { contribution[i]=(ContributionInfo *) MagickAssumeAligned( AcquireAlignedMemory(count,sizeof(**contribution))); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType x_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; MagickBooleanType status; MagickPixelPacket zero; MagickRealType scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); (void) memset(&zero,0,sizeof(zero)); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,resize_image,resize_image->columns,1) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { const int id = GetOpenMPThreadId(); MagickRealType bisect, density; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ContributionInfo *magick_restrict contribution; register IndexPacket *magick_restrict resize_indexes; register PixelPacket *magick_restrict q; register ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(MagickRealType) (x+0.5)/x_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); for (y=0; y < (ssize_t) resize_image->rows; y++) { MagickPixelPacket pixel; MagickRealType alpha; register ssize_t i; ssize_t j; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=alpha*GetPixelOpacity(p+j); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+y,ClampToQuantum(pixel.index)); } } else { double gamma; gamma=0.0; for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j); gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+y,ClampToQuantum(gamma*pixel.index)); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop- 1.0)+0.5); j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i-start].pixel-contribution[0].pixel); SetPixelIndex(resize_indexes+y,GetPixelIndex(indexes+j)); } q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HorizontalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType y_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; MagickBooleanType status; MagickPixelPacket zero; MagickRealType scale, support; ssize_t y; /* Apply filter to resize vertically from image to resize image. */ scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); (void) memset(&zero,0,sizeof(zero)); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickRealType bisect, density; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ContributionInfo *magick_restrict contribution; register IndexPacket *magick_restrict resize_indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(MagickRealType) (y+0.5)/y_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel, image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1), exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); for (x=0; x < (ssize_t) resize_image->columns; x++) { MagickPixelPacket pixel; MagickRealType alpha; register ssize_t i; ssize_t j; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=alpha*GetPixelOpacity(p+j); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+x,ClampToQuantum(pixel.index)); } } else { double gamma; gamma=0.0; for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j); gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+x,ClampToQuantum(gamma*pixel.index)); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop- 1.0)+0.5); j=(ssize_t) ((contribution[i-start].pixel-contribution[0].pixel)* image->columns+x); SetPixelIndex(resize_indexes+x,GetPixelIndex(indexes+j)); } q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_VerticalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } MagickExport Image *ResizeImage(const Image *image,const size_t columns, const size_t rows,const FilterTypes filter,const double blur, ExceptionInfo *exception) { FilterTypes filter_type; Image *filter_image, *resize_image; MagickOffsetType offset; MagickRealType x_factor, y_factor; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; /* Acquire resize image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter) && (blur == 1.0)) return(CloneImage(image,0,0,MagickTrue,exception)); /* Acquire resize filter. */ x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,blur,MagickFalse, exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) resize_image=AccelerateResizeImage(image,columns,rows,resize_filter, exception); if (resize_image != NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } #endif resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } if (x_factor > y_factor) filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); else filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(DestroyImage(resize_image)); } /* Resize image. */ offset=0; if (x_factor > y_factor) { span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &offset,exception); status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&offset,exception); } else { span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &offset,exception); status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&offset,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" CacheView *image_view, *sample_view; Image *sample_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t x; ssize_t *x_offset, y; PointInfo sample_offset; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Check for posible user defined sampling offset Artifact The default sampling offset is in the mid-point of sample regions. */ sample_offset.x=sample_offset.y=0.5-MagickEpsilon; { const char *value; value=GetImageArtifact(image,"sample:offset"); if (value != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; (void) ParseGeometry(value,&geometry_info); flags=ParseGeometry(value,&geometry_info); sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon; if ((flags & SigmaValue) != 0) sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon; } } /* Allocate scan line buffer and column offset buffers. */ x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); if (x_offset == (ssize_t *) NULL) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (x=0; x < (ssize_t) sample_image->columns; x++) x_offset[x]=(ssize_t) ((((double) x+sample_offset.x)*image->columns)/ sample_image->columns); /* Sample each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sample_view=AcquireAuthenticCacheView(sample_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,sample_image,sample_image->rows,1) #endif for (y=0; y < (ssize_t) sample_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict sample_indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t y_offset; if (status == MagickFalse) continue; y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/ sample_image->rows); p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); sample_indexes=GetCacheViewAuthenticIndexQueue(sample_view); /* Sample each column. */ for (x=0; x < (ssize_t) sample_image->columns; x++) *q++=p[x_offset[x]]; if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) sample_image->columns; x++) SetPixelIndex(sample_indexes+x,GetPixelIndex(indexes+x_offset[x])); if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SampleImage) #endif proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sample_view=DestroyCacheView(sample_view); x_offset=(ssize_t *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; if (status == MagickFalse) sample_image=DestroyImage(sample_image); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" CacheView *image_view, *scale_view; Image *scale_image; MagickBooleanType next_column, next_row, proceed, status; MagickPixelPacket pixel, *scale_scanline, *scanline, *x_vector, *y_vector, zero; MagickRealType alpha; PointInfo scale, span; register ssize_t i; ssize_t number_rows, y; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse) { InheritException(exception,&scale_image->exception); scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*scanline)); scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) scale_image->columns,sizeof(*scale_scanline)); y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*y_vector)); if ((scanline == (MagickPixelPacket *) NULL) || (scale_scanline == (MagickPixelPacket *) NULL) || (x_vector == (MagickPixelPacket *) NULL) || (y_vector == (MagickPixelPacket *) NULL)) { if ((image->rows != scale_image->rows) && (scanline != (MagickPixelPacket *) NULL)) scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline); if (scale_scanline != (MagickPixelPacket *) NULL) scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory( scale_scanline); if (x_vector != (MagickPixelPacket *) NULL) x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector); if (y_vector != (MagickPixelPacket *) NULL) y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector); scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) memset(y_vector,0,(size_t) image->columns* sizeof(*y_vector)); GetMagickPixelPacket(image,&pixel); (void) memset(&zero,0,sizeof(zero)); i=0; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); scale_view=AcquireAuthenticCacheView(scale_image,exception); for (y=0; y < (ssize_t) scale_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict scale_indexes; register MagickPixelPacket *magick_restrict s, *magick_restrict t; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) break; q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; break; } alpha=1.0; scale_indexes=GetCacheViewAuthenticIndexQueue(scale_view); if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha*GetPixelIndex(indexes+x)); p++; } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha* GetPixelIndex(indexes+x)); p++; } number_rows++; } for (x=0; x < (ssize_t) image->columns; x++) { y_vector[x].red+=scale.y*x_vector[x].red; y_vector[x].green+=scale.y*x_vector[x].green; y_vector[x].blue+=scale.y*x_vector[x].blue; if (scale_image->matte != MagickFalse) y_vector[x].opacity+=scale.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) y_vector[x].index+=scale.y*x_vector[x].index; } span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha* GetPixelIndex(indexes+x)); p++; } number_rows++; next_row=MagickFalse; } s=scanline; for (x=0; x < (ssize_t) image->columns; x++) { pixel.red=y_vector[x].red+span.y*x_vector[x].red; pixel.green=y_vector[x].green+span.y*x_vector[x].green; pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue; if (image->matte != MagickFalse) pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index=y_vector[x].index+span.y*x_vector[x].index; s->red=pixel.red; s->green=pixel.green; s->blue=pixel.blue; if (scale_image->matte != MagickFalse) s->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) s->index=pixel.index; s++; y_vector[x]=zero; } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ s=scanline; for (x=0; x < (ssize_t) scale_image->columns; x++) { if (scale_image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(s); alpha=PerceptibleReciprocal(alpha); SetPixelRed(q,ClampToQuantum(alpha*s->red)); SetPixelGreen(q,ClampToQuantum(alpha*s->green)); SetPixelBlue(q,ClampToQuantum(alpha*s->blue)); if (scale_image->matte != MagickFalse) SetPixelOpacity(q,ClampToQuantum(s->opacity)); if (scale_indexes != (IndexPacket *) NULL) SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*s->index)); q++; s++; } } else { /* Scale X direction. */ pixel=zero; next_column=MagickFalse; span.x=1.0; s=scanline; t=scale_scanline; for (x=0; x < (ssize_t) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { pixel=zero; t++; } pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { pixel=zero; next_column=MagickFalse; t++; } pixel.red+=scale.x*s->red; pixel.green+=scale.x*s->green; pixel.blue+=scale.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=scale.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=scale.x*s->index; span.x-=scale.x; } s++; } if (span.x > 0) { s--; pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; } if ((next_column == MagickFalse) && ((ssize_t) (t-scale_scanline) < (ssize_t) scale_image->columns)) { t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; } /* Transfer scanline to scaled image. */ t=scale_scanline; for (x=0; x < (ssize_t) scale_image->columns; x++) { if (scale_image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(t); alpha=PerceptibleReciprocal(alpha); SetPixelRed(q,ClampToQuantum(alpha*t->red)); SetPixelGreen(q,ClampToQuantum(alpha*t->green)); SetPixelBlue(q,ClampToQuantum(alpha*t->blue)); if (scale_image->matte != MagickFalse) SetPixelOpacity(q,ClampToQuantum(t->opacity)); if (scale_indexes != (IndexPacket *) NULL) SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*t->index)); t++; q++; } } if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse) { status=MagickFalse; break; } proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } scale_view=DestroyCacheView(scale_view); image_view=DestroyCacheView(image_view); /* Free allocated memory. */ y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector); scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline); x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; if (status == MagickFalse) scale_image=DestroyImage(scale_image); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleFactor 5 char *url, value[MaxTextExtent]; const char *name; Image *thumbnail_image; MagickRealType x_factor, y_factor; struct stat attributes; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; if ((x_factor*y_factor) > 0.1) thumbnail_image=ResizeImage(image,columns,rows,image->filter,image->blur, exception); else if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128)) thumbnail_image=ResizeImage(image,columns,rows,image->filter, image->blur,exception); else { Image *sample_image; sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows, exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter, image->blur,exception); sample_image=DestroyImage(sample_image); } if (thumbnail_image == (Image *) NULL) return(thumbnail_image); (void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page); if (thumbnail_image->matte == MagickFalse) (void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; /* Strip all profiles except color profiles. */ ResetImageProfileIterator(thumbnail_image); for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; ) { if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) { (void) DeleteImageProfile(thumbnail_image,name); ResetImageProfileIterator(thumbnail_image); } name=GetNextImageProfile(thumbnail_image); } (void) DeleteImageProperty(thumbnail_image,"comment"); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (strstr(image->magick_filename,"//") == (char *) NULL) (void) FormatLocaleString(value,MaxTextExtent,"file://%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (GetPathAttributes(image->filename,&attributes) != MagickFalse) { (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) attributes.st_mtime); (void) SetImageProperty(thumbnail_image,"Thumb::MTime",value); } (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),MagickFalse,value); (void) ConcatenateMagickString(value,"B",MaxTextExtent); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value); (void) FormatLocaleString(value,MaxTextExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value); url=GetMagickHomeURL(); (void) SetImageProperty(thumbnail_image,"software",url); url=DestroyString(url); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) image->magick_columns); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) image->magick_rows); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) GetImageListLength(image)); (void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value); return(thumbnail_image); }
bugged3.c
/****************************************************************************** * ЗАДАНИЕ: bugged3.c * ОПИСАНИЕ: * Ошибка времени выполнения. ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 50 int main (int argc, char **argv) { int i, nthreads, tid, section; float a[N], b[N], c[N]; void print_results(float array[N], int tid, int section); for (i = 0; i < N; i++) a[i] = b[i] = i * 1.0; #pragma omp parallel private(c, i, tid, section) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } #pragma omp barrier printf("Thread %d starting...\n", tid); #pragma omp barrier #pragma omp sections nowait { #pragma omp section { section = 1; for (i = 0; i < N; i++) c[i] = a[i] * b[i]; print_results(c, tid, section); } #pragma omp section { section = 2; for (i = 0; i < N; i++) c[i] = a[i] + b[i]; print_results(c, tid, section); } } #pragma omp barrier printf("Thread %d exiting...\n",tid); } } void print_results(float array[N], int tid, int section) { int i, j; j = 1; #pragma omp critical { printf("\nThread %d did section %d. The results are:\n", tid, section); for (i = 0; i < N; i++) { printf("%e ", array[i]); j++; if (j == 6) { printf("\n"); j = 1; } } printf("\n"); } // Лишний барьер, который пропускает потоки на барьере строки 54. // В итоге потоки не выполнившие секцию завершают работу без синхронизации с выполнившими. //#pragma omp barrier printf("Thread %d done and synchronized.\n", tid); }
hessian.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Sebastian Wouters <sebastianwouters@gmail.com> * * Date: August 3, 2015 * * Augmented Hessian Newton-Raphson optimization of * 1. either the Edmiston-Ruedenberg localization cost function * 2. or the Boys localization cost function * in both cases with an analytic gradient and hessian * * Reference: C. Edmiston and K. Ruedenberg, Reviews of Modern Physics 35, 457-464 (1963). http://dx.doi.org/10.1103/RevModPhys.35.457 * http://sebwouters.github.io/CheMPS2/doxygen/classCheMPS2_1_1EdmistonRuedenberg.html */ #include <stdlib.h> void hessian_boys(const int Norbs, double * x_symm, double * y_symm, double * z_symm, double * vector_in, double * vector_out){ int rowindex,p,q,r,s; const int num_vars = (Norbs*(Norbs-1))/2; // for (p=0; p<Norbs; p++){ // for (q=p+1; q<Norbs; q++){ // const int rowindex == q + p * Norbs - ((p+1)*(p+2))/2 #pragma omp parallel for schedule(static) for (rowindex=0; rowindex<num_vars; rowindex++){ s = num_vars - 1 - rowindex; r = 2; while ( (r*(r-1))/2 <= s ){ r++; } p = Norbs - r; q = rowindex - p*Norbs + ((p+1)*(p+2))/2; double value = 0.0; // Part 1: p == r for (s=p+1; s<Norbs; s++){ const int arg_pq = p + Norbs * q; const int arg_ps = p + Norbs * s; const int arg_qs = q + Norbs * s; const int arg_pp = p + Norbs * p; const int arg_qq = q + Norbs * q; const int arg_ss = s + Norbs * s; const double prefactor = ( 2 * x_symm[arg_pq] * x_symm[arg_ps] + x_symm[arg_qs] * ( x_symm[arg_pp] - 0.5 * x_symm[arg_qq] - 0.5 * x_symm[arg_ss] ) ) + ( 2 * y_symm[arg_pq] * y_symm[arg_ps] + y_symm[arg_qs] * ( y_symm[arg_pp] - 0.5 * y_symm[arg_qq] - 0.5 * y_symm[arg_ss] ) ) + ( 2 * z_symm[arg_pq] * z_symm[arg_ps] + z_symm[arg_qs] * ( z_symm[arg_pp] - 0.5 * z_symm[arg_qq] - 0.5 * z_symm[arg_ss] ) ); const int colindex = rowindex + s - q; // s + p * Norbs - ((p+1)*(p+2))/2 value += prefactor * vector_in[ colindex ]; } // Part 2: q == s for (r=0; r<q; r++){ const int arg_pq = p + Norbs * q; const int arg_rq = r + Norbs * q; const int arg_pr = p + Norbs * r; const int arg_qq = q + Norbs * q; const int arg_pp = p + Norbs * p; const int arg_rr = r + Norbs * r; const double prefactor = ( 2 * x_symm[arg_pq] * x_symm[arg_rq] + x_symm[arg_pr] * ( x_symm[arg_qq] - 0.5 * x_symm[arg_pp] - 0.5 * x_symm[arg_rr] ) ) + ( 2 * y_symm[arg_pq] * y_symm[arg_rq] + y_symm[arg_pr] * ( y_symm[arg_qq] - 0.5 * y_symm[arg_pp] - 0.5 * y_symm[arg_rr] ) ) + ( 2 * z_symm[arg_pq] * z_symm[arg_rq] + z_symm[arg_pr] * ( z_symm[arg_qq] - 0.5 * z_symm[arg_pp] - 0.5 * z_symm[arg_rr] ) ); const int colindex = q + r * Norbs - ((r+1)*(r+2))/2; value += prefactor * vector_in[ colindex ]; } // Part 3: q == r for (s=q+1; s<Norbs; s++){ const int arg_pq = p + Norbs * q; const int arg_qs = q + Norbs * s; const int arg_ps = p + Norbs * s; const int arg_qq = q + Norbs * q; const int arg_pp = p + Norbs * p; const int arg_ss = s + Norbs * s; const double prefactor = ( 2 * x_symm[arg_pq] * x_symm[arg_qs] + x_symm[arg_ps] * ( x_symm[arg_qq] - 0.5 * x_symm[arg_pp] - 0.5 * x_symm[arg_ss] ) ) + ( 2 * y_symm[arg_pq] * y_symm[arg_qs] + y_symm[arg_ps] * ( y_symm[arg_qq] - 0.5 * y_symm[arg_pp] - 0.5 * y_symm[arg_ss] ) ) + ( 2 * z_symm[arg_pq] * z_symm[arg_qs] + z_symm[arg_ps] * ( z_symm[arg_qq] - 0.5 * z_symm[arg_pp] - 0.5 * z_symm[arg_ss] ) ); const int colindex = s + q * Norbs - ((q+1)*(q+2))/2; value -= prefactor * vector_in[ colindex ]; } // Part 4: p == s for (r=0; r<p; r++){ const int arg_pq = p + Norbs * q; const int arg_rp = r + Norbs * p; const int arg_qr = q + Norbs * r; const int arg_pp = p + Norbs * p; const int arg_qq = q + Norbs * q; const int arg_rr = r + Norbs * r; const double prefactor = ( 2 * x_symm[arg_pq] * x_symm[arg_rp] + x_symm[arg_qr] * ( x_symm[arg_pp] - 0.5 * x_symm[arg_qq] - 0.5 * x_symm[arg_rr] ) ) + ( 2 * y_symm[arg_pq] * y_symm[arg_rp] + y_symm[arg_qr] * ( y_symm[arg_pp] - 0.5 * y_symm[arg_qq] - 0.5 * y_symm[arg_rr] ) ) + ( 2 * z_symm[arg_pq] * z_symm[arg_rp] + z_symm[arg_qr] * ( z_symm[arg_pp] - 0.5 * z_symm[arg_qq] - 0.5 * z_symm[arg_rr] ) ); const int colindex = p + r * Norbs - ((r+1)*(r+2))/2; value -= prefactor * vector_in[ colindex ]; } vector_out[ rowindex ] = -Norbs*value; } } void hessian_edmiston(const int Norbs, double * eri, double * vector_in, double * vector_out){ int rowindex,p,q,r,s; const int num_vars = (Norbs*(Norbs-1))/2; // for (p=0; p<Norbs; p++){ // for (q=p+1; q<Norbs; q++){ // const int rowindex == q + p * Norbs - ((p+1)*(p+2))/2 #pragma omp parallel for schedule(static) for (rowindex=0; rowindex<num_vars; rowindex++){ s = num_vars - 1 - rowindex; r = 2; while ( (r*(r-1))/2 <= s ){ r++; } p = Norbs - r; q = rowindex - p*Norbs + ((p+1)*(p+2))/2; double value = 0.0; // Part 1: p == r for (s=p+1; s<Norbs; s++){ const int pqps = p + Norbs * ( q + Norbs * ( p + Norbs * s )); const int ppqs = p + Norbs * ( p + Norbs * ( q + Norbs * s )); const int qqqs = q + Norbs * ( q + Norbs * ( q + Norbs * s )); const int sssq = s + Norbs * ( s + Norbs * ( s + Norbs * q )); const double prefactor = 2*(4*eri[pqps] + 2*eri[ppqs] - eri[qqqs] - eri[sssq]); const int colindex = rowindex + s - q; // s + p * Norbs - ((p+1)*(p+2))/2 value += prefactor * vector_in[ colindex ]; } // Part 2: q == s for (r=0; r<q; r++){ const int qpqr = q + Norbs * ( p + Norbs * ( q + Norbs * r )); const int qqpr = q + Norbs * ( q + Norbs * ( p + Norbs * r )); const int pppr = p + Norbs * ( p + Norbs * ( p + Norbs * r )); const int rrrp = r + Norbs * ( r + Norbs * ( r + Norbs * p )); const double prefactor = 2*(4*eri[qpqr] + 2*eri[qqpr] - eri[pppr] - eri[rrrp]); const int colindex = q + r * Norbs - ((r+1)*(r+2))/2; value += prefactor * vector_in[ colindex ]; } // Part 3: q == r for (s=q+1; s<Norbs; s++){ const int qpqs = q + Norbs * ( p + Norbs * ( q + Norbs * s )); const int qqps = q + Norbs * ( q + Norbs * ( p + Norbs * s )); const int ppps = p + Norbs * ( p + Norbs * ( p + Norbs * s )); const int sssp = s + Norbs * ( s + Norbs * ( s + Norbs * p )); const double prefactor = 2*(4*eri[qpqs] + 2*eri[qqps] - eri[ppps] - eri[sssp]); const int colindex = s + q * Norbs - ((q+1)*(q+2))/2; value -= prefactor * vector_in[ colindex ]; } // Part 4: p == s for (r=0; r<p; r++){ const int pqpr = p + Norbs * ( q + Norbs * ( p + Norbs * r )); const int ppqr = p + Norbs * ( p + Norbs * ( q + Norbs * r )); const int qqqr = q + Norbs * ( q + Norbs * ( q + Norbs * r )); const int rrrq = r + Norbs * ( r + Norbs * ( r + Norbs * q )); const double prefactor = 2*(4*eri[pqpr] + 2*eri[ppqr] - eri[qqqr] - eri[rrrq]); const int colindex = p + r * Norbs - ((r+1)*(r+2))/2; value -= prefactor * vector_in[ colindex ]; } vector_out[ rowindex ] = -value; } }
gemm_symm_int8.h
// chgemm is pleased to support the open source community by supporting ncnn available. // // author:tpoisonooo (https://github.com/tpoisonooo/chgemm) implement symmetric int8 GEMM on aarch64. // // Copyright (C) 2019 tpoisonooo. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #pragma once #if __aarch64__ #define DECOMPOSE_K\ int ktmp = k;\ int k8 = k >> 3;\ int k8_even = (k8 % 2 == 0) ? 0: 1;\ k -= (k8 << 3);\ int k4 = k >> 2;\ k -= (k4 << 2);\ int k2 = k >> 1;\ k -= (k2 << 1);\ int k1 = k;\ k = ktmp; #define DECOMPOSE_N\ int ntmp = n;\ int n4 = n >> 2;\ n -= (n4 << 2);\ int n2 = n >> 1;\ n -= (n2 << 1);\ int n1 = n;\ n = ntmp; #define PRINT_MATRIX 0 #if PRINT_MATRIX static void print_int8_matrix(char* name, const int8_t *a, int m, int k, int ldx) { fprintf(stdout, "------------- %s \n", name); for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { fprintf(stdout, "%d \t", a[i * ldx + j]); } fprintf(stdout, "\n\n"); } } static void print_int32_matrix(char* name, const int32_t *a, int m, int k, int ldx) { fprintf(stdout, "------------- %s \n", name); for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { fprintf(stdout, "%d \t", a[i * ldx + j]); } fprintf(stdout, "\n\n"); } } static void print_fp32_vec(char* name, const float *a, int len) { fprintf(stdout, "------------- %s \n", name); for (int i = 0; i < len; ++i) { fprintf(stdout, "%f \t", a[i]); } fprintf(stdout, "\n\n"); } #endif static void reorder_b(const int8_t* b, int8_t* sb, const int k, const int n, const int ldx) { #if PRINT_MATRIX print_int8_matrix("b", b, k, n, ldx); int8_t *origin = sb; #endif int i = 0; for (; i+3 < n; i += 4) { const int8_t *p0 = b + i; const int8_t *p1 = b + 1 * ldx + i; const int8_t *p2 = b + 2 * ldx + i; const int8_t *p3 = b + 3 * ldx + i; const int8_t *p4 = b + 4 * ldx + i; const int8_t *p5 = b + 5 * ldx + i; const int8_t *p6 = b + 6 * ldx + i; const int8_t *p7 = b + 7 * ldx + i; int j = 0; for (; j+7 < k; j += 8) { sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p4[0]; sb[5] = p5[0]; sb[6] = p6[0]; sb[7] = p7[0]; sb[8] = p0[1]; sb[9] = p1[1]; sb[10] = p2[1]; sb[11] = p3[1]; sb[12] = p4[1]; sb[13] = p5[1]; sb[14] = p6[1]; sb[15] = p7[1]; sb[16] = p0[2]; sb[17] = p1[2]; sb[18] = p2[2]; sb[19] = p3[2]; sb[20] = p4[2]; sb[21] = p5[2]; sb[22] = p6[2]; sb[23] = p7[2]; sb[24] = p0[3]; sb[25] = p1[3]; sb[26] = p2[3]; sb[27] = p3[3]; sb[28] = p4[3]; sb[29] = p5[3]; sb[30] = p6[3]; sb[31] = p7[3]; sb += 32; p0 += 8 * ldx; p1 += 8 * ldx; p2 += 8 * ldx; p3 += 8 * ldx; p4 += 8 * ldx; p5 += 8 * ldx; p6 += 8 * ldx; p7 += 8 * ldx; } if (j+3 < k) { j += 4; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p0[1]; sb[5] = p1[1]; sb[6] = p2[1]; sb[7] = p3[1]; sb[8] = p0[2]; sb[9] = p1[2]; sb[10] = p2[2]; sb[11] = p3[2]; sb[12] = p0[3]; sb[13] = p1[3]; sb[14] = p2[3]; sb[15] = p3[3]; sb += 16; p0 += 4 * ldx; p1 += 4 * ldx; p2 += 4 * ldx; p3 += 4 * ldx; } if (j+1 < k) { j += 2; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p0[1]; sb[3] = p1[1]; sb[4] = p0[2]; sb[5] = p1[2]; sb[6] = p0[3]; sb[7] = p1[3]; sb += 8; p0 += 2 * ldx; p1 += 2 * ldx; } if (j < k) { sb[0] = p0[0]; sb[1] = p0[1]; sb[2] = p0[2]; sb[3] = p0[3]; sb += 4; p0 += ldx; } } if (i+1 < n) { const int8_t *p0 = b + i; const int8_t *p1 = b + 1 * ldx + i; const int8_t *p2 = b + 2 * ldx + i; const int8_t *p3 = b + 3 * ldx + i; const int8_t *p4 = b + 4 * ldx + i; const int8_t *p5 = b + 5 * ldx + i; const int8_t *p6 = b + 6 * ldx + i; const int8_t *p7 = b + 7 * ldx + i; int j = 0; for (; j+7 < k; j += 8) { sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p4[0]; sb[5] = p5[0]; sb[6] = p6[0]; sb[7] = p7[0]; sb[8] = p0[1]; sb[9] = p1[1]; sb[10] = p2[1]; sb[11] = p3[1]; sb[12] = p4[1]; sb[13] = p5[1]; sb[14] = p6[1]; sb[15] = p7[1]; sb += 16; p0 += 8 * ldx; p1 += 8 * ldx; p2 += 8 * ldx; p3 += 8 * ldx; p4 += 8 * ldx; p5 += 8 * ldx; p6 += 8 * ldx; p7 += 8 * ldx; } if (j+3 < k) { j += 4; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p0[1]; sb[5] = p1[1]; sb[6] = p2[1]; sb[7] = p3[1]; sb += 8; p0 += 4 * ldx; p1 += 4 * ldx; p2 += 4 * ldx; p3 += 4 * ldx; } if (j+1 < k) { j += 2; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p0[1]; sb[3] = p1[1]; sb += 4; p0 += 2 * ldx; p1 += 2 * ldx; } if (j < k) { sb[0] = p0[0]; sb[1] = p0[1]; sb += 2; p0 += ldx; } i += 2; } if (i < n) { const int8_t *p0 = b + i; const int8_t *p1 = b + 1 * ldx + i; const int8_t *p2 = b + 2 * ldx + i; const int8_t *p3 = b + 3 * ldx + i; const int8_t *p4 = b + 4 * ldx + i; const int8_t *p5 = b + 5 * ldx + i; const int8_t *p6 = b + 6 * ldx + i; const int8_t *p7 = b + 7 * ldx + i; int j = 0; for (; j+7 < k; j += 8) { sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p4[0]; sb[5] = p5[0]; sb[6] = p6[0]; sb[7] = p7[0]; sb += 8; p0 += 8 * ldx; p1 += 8 * ldx; p2 += 8 * ldx; p3 += 8 * ldx; p4 += 8 * ldx; p5 += 8 * ldx; p6 += 8 * ldx; p7 += 8 * ldx; } if (j+3 < k) { j += 4; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb += 4; p0 += 4 * ldx; p1 += 4 * ldx; p2 += 4 * ldx; p3 += 4 * ldx; } if (j+1 < k) { j += 2; sb[0] = p0[0]; sb[1] = p1[0]; sb += 2; p0 += 2 * ldx; p1 += 2 * ldx; } if (j < k) { sb[0] = p0[0]; sb += 1; p0 += ldx; } } #if PRINT_MATRIX print_int8_matrix("sb", origin, k, n, n); #endif } static void reorder_a(int8_t* a, int8_t* sa, int m, const int k, const int ldx) { #if PRINT_MATRIX print_int8_matrix("a", a, m, k, ldx); int8_t *origin = sa; #endif int i = 0; for (; i + 3 < m; i += 4) { int8_t *p0 = a; int8_t *p1 = a + ldx; int8_t *p2 = a + 2 * ldx; int8_t *p3 = a + 3 * ldx; int j = 0; for (; j + 7 < k; j += 8) { asm volatile ( "ld1 {v0.8b}, [%0], #8 \n" "ld1 {v1.8b}, [%1], #8 \n" "ld1 {v2.8b}, [%2], #8 \n" "ld1 {v3.8b}, [%3], #8 \n" "st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%4], #32\n" : "=r"(p0), "=r"(p1), "=r"(p2), "=r"(p3), "=r"(sa) : "0"(p0), "1"(p1), "2"(p2), "3"(p3), "4"(sa) : "cc", "memory", "v0", "v1", "v2", "v3" ); } if (j + 3 < k) { j += 4; asm volatile ( "ld1 {v0.8b}, [%0] \n" "add %0, %0, #4 \n" "ld1 {v1.8b}, [%1] \n" "add %1, %1, #4 \n" "ld1 {v2.8b}, [%2] \n" "add %2, %2, #4 \n" "ld1 {v3.8b}, [%3] \n" "add %3, %3, #4 \n" "trn1 v0.2s, v0.2s, v1.2s \n" "st1 {v0.8b}, [%4], #8 \n" "trn1 v2.2s, v2.2s, v3.2s \n" "st1 {v2.8b}, [%4], #8 \n" : "=r"(p0), "=r"(p1), "=r"(p2), "=r"(p3), "=r"(sa) : "0"(p0), "1"(p1), "2"(p2), "3"(p3), "4"(sa) : "cc", "memory", "v0", "v1", "v2", "v3" ); } if (j + 1 < k) { j += 2; asm volatile ( "ld1 {v0.8b}, [%0] \n" "add %0, %0, #2 \n" "ld1 {v1.8b}, [%1] \n" "add %1, %1, #2 \n" "ld1 {v2.8b}, [%2] \n" "add %2, %2, #2 \n" "ld1 {v3.8b}, [%3] \n" "add %3, %3, #2 \n" "trn1 v0.4h, v0.4h, v1.4h \n" "trn1 v2.4h, v2.4h, v3.4h \n" "trn1 v0.2s, v0.2s, v2.2s \n" "st1 {v0.8b}, [%4], #8 \n" : "=r"(p0), "=r"(p1), "=r"(p2), "=r"(p3), "=r"(sa) : "0"(p0), "1"(p1), "2"(p2), "3"(p3), "4"(sa) : "cc", "memory", "v0", "v1", "v2", "v3" ); } if (j < k) { *sa++ = *p0; *sa++ = *p1; *sa++ = *p2; *sa++ = *p3; } a += 4 * ldx; } if (i + 1 < m) { i += 2; int8_t *p0 = a; int8_t *p1 = a + ldx; int j = 0; for (; j + 7 < k; j += 8) { asm volatile ( "ld1 {v0.8b}, [%0], #8 \n" "ld1 {v1.8b}, [%1], #8 \n" "st1 {v0.8b, v1.8b}, [%2], #16\n" : "=r"(p0), "=r"(p1), "=r"(sa) : "0"(p0), "1"(p1), "2"(sa) : "cc", "memory", "v0", "v1" ); } if (j + 3 < k) { j += 4; asm volatile ( "ld1 {v0.8b}, [%0] \n" "add %0, %0, #4 \n" "ld1 {v1.8b}, [%1] \n" "add %1, %1, #4 \n" "trn1 v0.2s, v0.2s, v1.2s \n" "st1 {v0.8b}, [%2], #8 \n" : "=r"(p0), "=r"(p1), "=r"(sa) : "0"(p0), "1"(p1), "2"(sa) : "cc", "memory", "v0", "v1" ); } if (j + 1 < k) { j += 2; sa[0] = p0[0]; sa[1] = p0[1]; sa[2] = p1[0]; sa[3] = p1[1]; sa += 4; p0 += 2; p1 += 2; } if (j < k) { sa[0] = p0[0]; sa[1] = p1[0]; sa += 2; } a += 2 * ldx; } if (i < m) { memcpy(sa, a, sizeof(int8_t) * ldx); } #if PRINT_MATRIX print_int8_matrix("sa", origin, m, k, k); #endif } void int8kernel_m1(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int, float* scales, float* bias) { void *pc = dst; int8_t *pa = sa; int8_t *pb = sb; DECOMPOSE_K DECOMPOSE_N if (n4 > 0) { asm volatile( "9: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b\n" " eor v11.16b, v11.16b, v11.16b\n" " mov x8, %0 // PanelA\n" " cmp %w4, #0 \n" " beq 1f \n" " mov w19, %w4 \n" " cmp %w3, #0 \n" " beq 2f// loop number is even \n" " // start loopm1_kd8_nd4\n" " subs w19, w19, #1 \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n" " ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " saddlp v8.4s, v0.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " saddlp v9.4s, v0.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " saddlp v10.4s, v0.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " saddlp v11.4s, v0.8h \n" " cmp w19, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n" " ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [%1], #32\n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v2.8b, v4.8b \n" " smlal v0.8h, v3.8b, v12.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v2.8b, v5.8b \n" " smlal v1.8h, v3.8b, v13.8b \n" " sadalp v9.4s, v1.8h \n" " smull v0.8h, v2.8b, v6.8b \n" " smlal v0.8h, v3.8b, v14.8b \n" " sadalp v10.4s, v0.8h \n" " smull v1.8h, v2.8b, v7.8b \n" " smlal v1.8h, v3.8b, v15.8b \n" " sadalp v11.4s, v1.8h \n" " subs w19, w19, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v10.4s, v10.4s, v11.4s\n" " addp v8.4s, v8.4s, v10.4s \n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w5, #0 \n" " beq 4f \n" " // start subkernel_m1n4k4 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n" " sxtl v4.8h, v4.8b \n" " sxtl v5.8h, v5.8b \n" " mov v6.d[0], v4.d[1] \n" " mov v7.d[0], v5.d[1] \n" " ld1 {v2.8b}, [%0] // load A1x4\n" " add %0, %0, #4 \n" " sxtl v2.8h, v2.8b \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " smull v15.4s, v2.4h, v7.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " add v8.4s, v8.4s, v12.4s \n" " 4: \n" " cmp %w6, #0 \n" " beq 5f \n" " // start subkernel_m1n4k2\n" " ld1 {v4.8b}, [%0] // load A1x2 \n" " add %0, %0, #2 \n" " ld1 {v0.8b}, [%1], #8 // load B2x4 \n" " mov v4.h[1], v4.h[0] \n" " mov v4.s[1], v4.s[0] \n" " smull v0.8h, v0.8b, v4.8b \n" " sadalp v8.4s, v0.8h \n" " 5: \n" " cmp %w7, #0 \n" " beq 6f \n" " // start subkernel_m1n4k1 \n" " ld1 {v4.8b}, [%1] // load B1x4\n" " add %1, %1, #4 \n" " ld1 {v2.8b}, [%0] // load A1x1\n" " add %0, %0, #1 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " ldr w24, [%9] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " // fp32 *= scale_tm \n" " mov v12.s[0], w24 \n" " fmul v8.4s, v8.4s, v12.s[0]\n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ldr w24, [%10] \n" " dup v15.4s, w24 \n" " fadd v8.4s, v8.4s, v15.4s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s\n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.s}[0], [%2]\n" " add %2, %2, #4 \n" " b m1_loopnd4_finish\n" " 7: \n" " st1 {v8.4s}, [%2], #16 \n" " m1_loopnd4_finish: \n" " subs %w8, %w8, #1 \n" " mov %0, x8 \n" " bne 9b \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc), // %2 "=r"(k8_even),// %3 "=r"(k8), // %4 "=r"(k4), // %5 "=r"(k2), // %6 "=r"(k1), // %7 "=r"(n4), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc), "3"(k8_even), "4"(k8), "5"(k4), "6"(k2), "7"(k1), "8"(n4), "9"(scales), "10"(bias) : "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n2 > 0) { asm volatile( "m1_nd2_start: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b\n" " eor v11.16b, v11.16b, v11.16b\n" " mov x8, %0 // PanelA\n" " cmp %w4, #0 \n" " beq 1f // k <= 7\n" " mov w19, %w4\n" " cmp %w3, #0 \n" " beq 2f // loop number is even \n" " // start loopmd1_kd8_nd2 \n" " subs w19, w19, #1 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n" " ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " saddlp v8.4s, v0.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " saddlp v9.4s, v0.8h \n" " cmp w19, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32\n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v2.8b, v4.8b \n" " smlal v0.8h, v3.8b, v6.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v2.8b, v5.8b \n" " smlal v1.8h, v3.8b, v7.8b \n" " sadalp v9.4s, v1.8h \n" " subs w19, w19, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " // start process kd4 kd2 kd1 cases \n" " 1: \n" " cmp %w5, 0 \n" " beq 4f \n" " // start subkernel_m1n2k4 \n" " ld1 {v4.8b}, [%1], #8 // load B4x2\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " ld1 {v2.8b}, [%0] // load A1x4\n" " add %0, %0, #4 \n" " sxtl v2.8h, v2.8b \n" " smull v9.4s, v2.4h, v4.4h \n" " smull v10.4s, v2.4h, v6.4h \n" " addp v9.4s, v9.4s, v10.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " 4: \n" " cmp %w6, 0 \n" " beq 5f \n" " // start subkernel_m1n2k2 \n" " ld1 {v4.8b}, [%0] // load A1x2\n" " add %0, %0, #2 \n" " ld1 {v0.8b}, [%1] // load B2x2\n" " add %1, %1, #4 \n" " mov v4.h[1], v4.h[0] \n" " smull v0.8h, v4.8b, v0.8b \n" " saddlp v0.4s, v0.8h \n" " add v8.4s, v8.4s, v0.4s \n" " 5: \n" " cmp %w7, 0 \n" " beq 6f \n" " // start subkernel_m1n2k1 \n" " ld1 {v4.8b}, [%1] // load B1x2\n" " add %1, %1, #2 \n" " ld1 {v2.8b}, [%0] // load A1x1\n" " add %0, %0, #2 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " // v12: s0 s1 \n" " ldr w24, [%9] \n" " mov v12.s[0], w24 \n" " mov v12.s[1], v12.s[0] \n" " // int32 => fp32 \n" " scvtf v8.2s, v8.2s \n" " // fp32 *= scale_tm \n" " fmul v8.2s, v8.2s, v12.2s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ldr w24, [%10] \n" " mov v12.s[0], w24 \n" " mov v12.s[1], v12.s[0] \n" " fadd v8.2s, v8.2s, v12.2s \n" " 8:\n" " // fp32 -> int32 \n" " fcvtas v8.2s, v8.2s\n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.h}[0], [%2]\n" " add %2, %2, #2 \n" " b m1_loopnd2_finish\n" " 7: \n" " st1 {v8.2s}, [%2], #8 \n" " m1_loopnd2_finish: \n" " mov %0, x8 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc), // %2 "=r"(k8_even),// %3 "=r"(k8), // %4 "=r"(k4), // %5 "=r"(k2), // %6 "=r"(k1), // %7 "=r"(n4), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc), "3"(k8_even), "4"(k8), "5"(k4), "6"(k2), "7"(k1), "8"(n4), "9"(scales), "10"(bias) : "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n1 > 0) { asm volatile ( "m1_nd1_start: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b\n" " eor v11.16b, v11.16b, v11.16b\n" " cmp %w4, #0 \n" " beq 1f // k <= 7 \n" " mov w19, %w4\n" " cmp %w3, #0 \n" " beq 2f // loop number is even \n" " // start loopkd8_nd1 \n" " subs w19, w19, #1 \n" " ld1 {v4.8b}, [%1], #8 // load B line \n" " ld1 {v2.8b}, [%0], #8 // load A line \n" " smull v0.8h, v4.8b, v2.8b \n" " saddlp v8.4s, v0.8h \n" " cmp w19, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v24.8b, v25.8b}, [%0], #16\n" " smull v0.8h, v24.8b, v4.8b \n" " smlal v0.8h, v25.8b, v5.8b \n" " sadalp v8.4s, v0.8h \n" " subs w19, w19, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w5, 0 \n" " beq 4f \n" " // start subkernel_m1n1k4 \n" " ld1 {v4.8b}, [%1] // load B4x1\n" " add %1, %1, #4 \n" " sxtl v4.8h, v4.8b // extend B4x1 to v4\n" " ld1 {v2.8b}, [%0] // load A1x4\n" " add %0, %0, #4 \n" " sxtl v2.8h, v2.8b \n" " smull v9.4s, v2.4h, v4.4h \n" " addp v9.4s, v9.4s, v9.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " 4: \n" " cmp %w6, 0 \n" " beq 5f \n" " // start subkernel_m1n1k2 \n" " ld1 {v4.8b}, [%0] // load A1x2\n" " add %0, %0, #2 \n" " ld1 {v0.8b}, [%1] // load B2x1\n" " add %1, %1, #2 \n" " smull v0.8h, v0.8b, v4.8b \n" " saddlp v0.4s, v0.8h \n" " add v8.4s, v8.4s, v0.4s \n" " 5: \n" " cmp %w7, 0 \n" " beq 6f \n" " // start subkernel_m1n1k1 \n" " ld1 {v0.8b}, [%1] // load B1x1 \n" " add %1, %1, #1 \n" " ld1 {v1.8b}, [%0] // load A1x1 \n" " add %0, %0, #1 \n" " sxtl v1.8h, v1.8b \n" " sxtl v0.8h, v0.8b \n" " smull v0.4s, v1.4h, v0.h[0] \n" " add v8.4s, v8.4s, v0.4s \n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " // int32 => fp32 \n" " scvtf v8.2s, v8.2s \n" " // fp32 *= scale_tm\n" " ldr w24, [%9] \n" " mov v12.s[0], w24 \n" " fmul v8.2s, v8.2s, v12.2s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ldr w24, [%10] \n" " mov v12.s[0], w24 \n" " fadd v8.2s, v8.2s, v12.2s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.2s, v8.2s\n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.b}[0], [%2]\n" " b m1_finish \n" " 7: \n" " st1 {v8.s}[0], [%2] \n" " m1_finish: \n" " mov x0, #0 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc), // %2 "=r"(k8_even),// %3 "=r"(k8), // %4 "=r"(k4), // %5 "=r"(k2), // %6 "=r"(k1), // %7 "=r"(n4), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc), "3"(k8_even), "4"(k8), "5"(k4), "6"(k2), "7"(k1), "8"(n4), "9"(scales), "10"(bias) : "cc", "memory", "x0", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } } void int8kernel_m2(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) { void *pc0, *pc1; if (scales == nullptr) { pc0 = (int32_t*)dst; pc1 = ((int32_t*)pc0) + ldc; } else { pc0 = dst; pc1 = ((int8_t*)pc0) + ldc; } int8_t *pa = sa; int8_t *pb = sb; DECOMPOSE_K DECOMPOSE_N if (n4 > 0) { asm volatile( "9: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b \n" " eor v11.16b, v11.16b, v11.16b \n" " eor v12.16b, v12.16b, v12.16b \n" " eor v13.16b, v13.16b, v13.16b \n" " eor v14.16b, v14.16b, v14.16b \n" " eor v15.16b, v15.16b, v15.16b \n" " eor v16.16b, v16.16b, v16.16b \n" " eor v17.16b, v17.16b, v17.16b \n" " eor v18.16b, v18.16b, v18.16b \n" " eor v19.16b, v19.16b, v19.16b \n" " eor v20.16b, v20.16b, v20.16b \n" " eor v21.16b, v21.16b, v21.16b \n" " eor v22.16b, v22.16b, v22.16b \n" " eor v23.16b, v23.16b, v23.16b \n" " mov x8, %0 // PanelA \n" " cmp %w5, #0 \n" " beq 1f \n" " mov w17, %w5 \n" " cmp %w4, #0 \n" " beq 2f // loop number is even \n" " // start loopm2_kd8_nd4\n" " subs w17, w17, #1 \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " smull v1.8h, v6.8b, v3.8b \n" " saddlp v10.4s, v0.8h \n" " saddlp v14.4s, v1.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " smull v1.8h, v7.8b, v3.8b \n" " saddlp v11.4s, v0.8h \n" " saddlp v15.4s, v1.8h \n" " cmp w17, #0 \n" " beq 3f \n" " 2: \n" " add x12, %1, #32 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x12], #16 \n" " ld1 {v24.8b, v25.8b}, [%0], #16\n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v8.4s, v0.8h\n" " sadalp v9.4s, v1.8h\n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h\n" " sadalp v13.4s, v1.8h\n" " // start v10v11, v14v15, v18v19, v22v23, error here!\n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x12], #16 \n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v10.4s, v0.8h \n" " sadalp v11.4s, v1.8h \n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v14.4s, v0.8h \n" " sadalp v15.4s, v1.8h \n" " add %1, %1, #32 \n" " subs w17, w17, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v10.4s, v10.4s, v11.4s\n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v8.4s, v8.4s, v10.4s \n" " addp v9.4s, v12.4s, v14.4s \n" " // start process kd4 kd2 kd1 cases \n" " 1: \n" " cmp %w6, #0 \n" " beq 4f \n" " // start subkernel_m2n4k4 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n" " sxtl v4.8h, v4.8b \n" " sxtl v5.8h, v5.8b \n" " mov v6.d[0], v4.d[1] \n" " mov v7.d[0], v5.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " smull v15.4s, v2.4h, v7.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " add v8.4s, v8.4s, v12.4s \n" " smull v16.4s, v3.4h, v4.4h \n" " smull v17.4s, v3.4h, v6.4h \n" " smull v18.4s, v3.4h, v5.4h \n" " smull v19.4s, v3.4h, v7.4h \n" " addp v16.4s, v16.4s, v17.4s\n" " addp v18.4s, v18.4s, v19.4s\n" " addp v16.4s, v16.4s, v18.4s\n" " add v9.4s, v9.4s, v16.4s \n" " 4: \n" " cmp %w7, #0 \n" " beq 5f \n" " // start subkernel_m2n4k2 \n" " ld1 {v4.8b}, [%0] // load A2x2 \n" " add %0, %0, #4 \n" " ld1 {v0.8b}, [%1], #8 // load B2x4 \n" " // 00 11 22 33 \n" " rev32 v1.4h, v0.4h // 11 00 33 22 \n" " rev64 v2.2s, v0.2s // 22 33 00 11 \n" " rev64 v3.4h, v0.4h // 33 22 11 00 \n" " smull v12.8h, v4.8b, v0.8b \n" " smull v13.8h, v4.8b, v1.8b \n" " smull v14.8h, v4.8b, v2.8b \n" " smull v15.8h, v4.8b, v3.8b \n" " saddlp v12.4s, v12.8h \n" " saddlp v13.4s, v13.8h \n" " saddlp v14.4s, v14.8h \n" " saddlp v15.4s, v15.8h \n" " mov v16.s[0], v12.s[0] \n" " mov v16.s[1], v13.s[0] \n" " mov v16.s[2], v14.s[0] \n" " mov v16.s[3], v15.s[0] \n" " mov v17.s[0], v13.s[1] \n" " mov v17.s[1], v12.s[1] \n" " mov v17.s[2], v15.s[1] \n" " mov v17.s[3], v14.s[1] \n" " add v8.4s, v8.4s, v16.4s \n" " add v9.4s, v9.4s, v17.4s \n" " 5: \n" " cmp %w8, #0 \n" " beq 6f \n" " // start subkernel_m2n4k1 \n" " ld1 {v4.8b}, [%1] // load B1x4\n" " add %1, %1, #4 \n" " ld1 {v2.8b}, [%0] // load A2x1\n" " add %0, %0, #2 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " smlal v9.4s, v4.4h, v2.h[1]\n" " 6: \n" " cmp %10, #0 \n" " beq 7f \n" " ld1 {v12.2s}, [%10] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " scvtf v9.4s, v9.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.s[0]\n" " fmul v9.4s, v9.4s, v12.s[1]\n" " cmp %11, #0 \n" " beq 8f \n" " // fp32 += scales_tm \n" " ld1 {v14.2s}, [%11] \n" " dup v15.4s, v14.s[0] \n" " fadd v8.4s, v8.4s, v15.4s \n" " dup v15.4s, v14.s[1] \n" " fadd v9.4s, v9.4s, v15.4s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s\n" " fcvtas v9.4s, v9.4s\n" " // int32 -> int16 \n" " sqxtn v6.4h, v8.4s \n" " sqxtn2 v6.8h, v9.4s\n" " // int16 -> int8 \n" " sqxtn v8.8b, v6.8h \n" " // save \n" " st1 {v8.s}[0], [%2] \n" " add %2, %2, #4 \n" " st1 {v8.s}[1], [%3] \n" " add %3, %3, #4 \n" " b m2_loopnd4_finish \n" " 7: \n" " st1 {v8.4s}, [%2], #16 \n" " st1 {v9.4s}, [%3], #16 \n" " m2_loopnd4_finish: \n" " subs %w9, %w9, #1 \n" " mov %0, x8 \n" " bne 9b \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(k8_even),// %4 "=r"(k8), // %5 "=r"(k4), // %6 "=r"(k2), // %7 "=r"(k1), // %8 "=r"(n4), // %9 "=r"(scales), // %10 "=r"(bias) // %11 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(k8_even), "5"(k8), "6"(k4), "7"(k2), "8"(k1), "9"(n4), "10"(scales), "11"(bias) : "cc", "memory", "x8", "w17", "x12", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n2 > 0) { asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "m2_nd2_start: \n" " mov x8, %0 // PanelA \n" " cmp %w5, #0 \n" " beq 1f \n" " mov w17, %w5 \n" " cmp %w4, #0 \n" " beq 2f // loop number is even \n" " // start loopmd2_kd8_nd2 \n" " subs w17, w17, #1 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " cmp w17, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [%1], #16 \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v24.8b, v25.8b}, [%0], #16\n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v8.4s, v0.8h\n" " sadalp v9.4s, v1.8h\n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h \n" " sadalp v13.4s, v1.8h \n" " subs w17, w17, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w6, #0 \n" " beq 4f \n" " // start subkernel_m2n2k4 \n" " ld1 {v4.8b}, [%1], #8 // load B4x2\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load first A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v9.4s, v2.4h, v4.4h \n" " smull v10.4s, v2.4h, v6.4h \n" " addp v9.4s, v9.4s, v10.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v3.4h, v4.4h \n" " smull v14.4s, v3.4h, v6.4h \n" " addp v13.4s, v13.4s, v14.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " 4: \n" " cmp %w7, 0 \n" " beq 5f \n" " // start subkernel_m2n2k2 \n" " ld1 {v4.8b}, [%0] // load A2x2\n" " add %0, %0, #4 \n" " ld1 {v0.8b}, [%1] // load B2x2\n" " add %1, %1, #4 \n" " // 00 11\n" " rev32 v1.4h, v0.4h // 11 00\n" " smull v21.8h, v4.8b, v0.8b \n" " smull v22.8h, v4.8b, v1.8b \n" " saddlp v21.4s, v21.8h \n" " saddlp v22.4s, v22.8h \n" " mov v9.s[0], v21.s[0] \n" " mov v9.s[1], v22.s[0] \n" " add v8.4s, v8.4s, v9.4s \n" " mov v13.s[0], v22.s[1] \n" " mov v13.s[1], v21.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " 5: \n" " cmp %w8, #0 \n" " beq 6f \n" " // start subkernel_m2n2k1 \n" " ld1 {v4.8b}, [%1] // load B1x2\n" " add %1, %1, #2 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #2 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " smlal v12.4s, v4.4h, v2.h[1] \n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " mov v8.d[1], v12.d[0] \n" " // v12: 0 1 \n" " ld1 {v12.2s}, [%9] \n" " zip1 v12.4s, v12.4s, v12.4s\n" " // v12: 0 0 1 1 \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.4s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ld1 {v12.2s}, [%10] \n" " zip1 v12.4s, v12.4s, v12.4s\n" " fadd v8.4s, v8.4s, v12.4s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.h}[0], [%2] \n" " add %2, %2, #2 \n" " st1 {v8.h}[1], [%3] \n" " add %3, %3, #2 \n" " b m2_loopnd2_finish \n" " 7:" " st1 {v8.2s}, [%2], #8 \n" " st1 {v12.2s}, [%3], #8 \n" " m2_loopnd2_finish: \n" " mov %0, x8 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(k8_even),// %4 "=r"(k8), // %5 "=r"(k4), // %6 "=r"(k2), // %7 "=r"(k1), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(k8_even), "5"(k8), "6"(k4), "7"(k2), "8"(k1), "9"(scales), "10"(bias) : "cc", "memory", "x8", "x12", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n1 > 0) { asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "m2_nd1_start: \n" " cmp %w5, #0 \n" " beq 1f // k <=7\n" " mov w17, %w5\n" " cmp %w4, #0 \n" " beq 2f // loop number is even \n" " // start loopkd8_nd1 \n" " subs w17, w17, #1 \n" " ld1 {v4.8b}, [%1], #8 // load four lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " cmp w17, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n" " smull v0.8h, v24.8b, v4.8b \n" " smlal v0.8h, v26.8b, v5.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v25.8b, v4.8b \n" " smlal v1.8h, v27.8b, v5.8b \n" " sadalp v12.4s, v1.8h \n" " subs w17, w17, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " addp v12.4s, v12.4s, v12.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w6, #0 \n" " beq 4f \n" " // start subkernel_m2n1k2 \n" " ld1 {v4.8b}, [%1] // load B4x1\n" " add %1, %1, #4 \n" " sxtl v4.8h, v4.8b // extend B4x1 to v4\n" " ld1 {v2.8b}, [%0], #8 // load A2x4 \n" " sxtl v2.8h, v2.8b \n" " mov v5.d[0], v2.d[1] \n" " smull v9.4s, v2.4h, v4.4h \n" " addp v9.4s, v9.4s, v9.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v5.4h, v4.4h \n" " addp v13.4s, v13.4s, v13.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " 4: \n" " cmp %w7, 0 \n" " beq 5f \n" " // start subkernel_m2n1k2 \n" " ld1 {v4.8b}, [%0] // load A2x2\n" " add %0, %0, #4 \n" " ld1 {v0.8b}, [%1] // load B2x1\n" " add %1, %1, #2 \n" " mov v0.h[1], v0.h[0] \n" " smull v0.8h, v0.8b, v4.8b \n" " saddlp v0.4s, v0.8h \n" " mov v9.s[0], v0.s[0] \n" " add v8.4s, v8.4s, v9.4s \n" " mov v13.s[0], v0.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " 5: \n" " cmp %w8, 0 \n" " beq 6f \n" " // start subkernel_m2n1k1 \n" " ld1 {v0.8b}, [%1] // load B1x1\n" " add %1, %1, #1 \n" " ld1 {v1.8b}, [%0] // load A2x1\n" " add %0, %0, #2 \n" " sxtl v1.8h, v1.8b \n" " sxtl v0.8h, v0.8b \n" " smull v0.4s, v1.4h, v0.h[0]\n" " mov v1.s[0], v0.s[1] \n" " add v8.4s, v8.4s, v0.4s \n" " add v12.4s, v12.4s, v1.4s \n" " 6: \n" " cmp %w9, #0 \n" " beq 7f \n" " mov v8.s[1], v12.s[0] \n" " // v12: s0 s1 \n" " ld1 {v12.2s}, [%9] \n" " // int32 => fp32 \n" " scvtf v8.2s, v8.2s \n" " // fp32 *= scale_tm \n" " fmul v8.2s, v8.2s, v12.2s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ld1 {v12.2s}, [%10] \n" " fadd v8.2s, v8.2s, v12.2s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.2s, v8.2s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.b}[0], [%2] \n" " st1 {v8.b}[1], [%3] \n" " b m2_finish \n" " 7: \n" " st1 {v8.s}[0], [%2] \n" " st1 {v12.s}[0], [%3] \n" " m2_finish: \n" " mov x0, #0 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(k8_even),// %4 "=r"(k8), // %5 "=r"(k4), // %6 "=r"(k2), // %7 "=r"(k1), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(k8_even), "5"(k8), "6"(k4), "7"(k2), "8"(k1), "9"(scales), "10"(bias) : "cc", "memory", "x0", "x8", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } } void int8kernel_m4(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) { void *pc0, *pc1, *pc2, *pc3; if (scales == nullptr) { pc0 = (int32_t*)dst; pc1 = ((int32_t*)pc0) + ldc; pc2 = ((int32_t*)pc1) + ldc; pc3 = ((int32_t*)pc2) + ldc; } else { pc0 = dst; pc1 = ((int8_t*)pc0) + ldc; pc2 = ((int8_t*)pc1) + ldc; pc3 = ((int8_t*)pc2) + ldc; } int8_t *pa = sa; int8_t *pb = sb; DECOMPOSE_K DECOMPOSE_N if (n4 > 0) { asm volatile( "8: \n" " eor v8.8b, v8.8b, v8.8b \n" " eor v9.8b, v9.8b, v9.8b \n" " eor v10.8b, v10.8b, v10.8b \n" " eor v11.8b, v11.8b, v11.8b \n" " eor v12.8b, v12.8b, v12.8b \n" " eor v13.8b, v13.8b, v13.8b \n" " eor v14.8b, v14.8b, v14.8b \n" " eor v15.8b, v15.8b, v15.8b \n" " eor v16.8b, v16.8b, v16.8b \n" " eor v17.8b, v17.8b, v17.8b \n" " eor v18.8b, v18.8b, v18.8b \n" " eor v19.8b, v19.8b, v19.8b \n" " eor v20.8b, v20.8b, v20.8b \n" " eor v21.8b, v21.8b, v21.8b \n" " eor v22.8b, v22.8b, v22.8b \n" " eor v23.8b, v23.8b, v23.8b \n" " mov x8, %0 \n" " cmp %w7, #0 \n" " beq 1f \n" " mov w20, %w7 \n" " cmp %w6, #0 \n" " beq 2f \n" " subs w20, w20, #1 \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " smull v1.8h, v6.8b, v3.8b \n" " saddlp v10.4s, v0.8h \n" " saddlp v14.4s, v1.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " smull v1.8h, v7.8b, v3.8b \n" " saddlp v11.4s, v0.8h \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " saddlp v15.4s, v1.8h \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v16.4s, v0.8h \n" " saddlp v20.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v17.4s, v0.8h \n" " saddlp v21.4s, v1.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " smull v1.8h, v6.8b, v3.8b \n" " saddlp v18.4s, v0.8h \n" " saddlp v22.4s, v1.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " smull v1.8h, v7.8b, v3.8b \n" " saddlp v19.4s, v0.8h \n" " saddlp v23.4s, v1.8h \n" " cmp w20, #0 \n" " beq 3f \n" " 2: \n" " add x15, %x1, #32 \n" " add x14, %x0, #32 \n" " ld1 {v4.8b, v5.8b}, [%1], #16\n" " ld1 {v2.8b, v3.8b}, [%0], #16\n" " smull v0.8h, v4.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x15], #16 \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v24.8b, v25.8b}, [x14], #16\n" " smlal v0.8h, v6.8b, v24.8b\n" " smlal v1.8h, v7.8b, v24.8b\n" " sadalp v8.4s, v0.8h\n" " sadalp v9.4s, v1.8h\n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h\n" " sadalp v13.4s, v1.8h\n" " // finish v8v9 v12v13, start proc v16v17,v20v21\n" " ld1 {v28.8b, v29.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v28.8b \n" " smull v1.8h, v5.8b, v28.8b \n" " ld1 {v26.8b, v27.8b}, [x14], #16\n" " smlal v0.8h, v6.8b, v26.8b \n" " smlal v1.8h, v7.8b, v26.8b \n" " sadalp v16.4s, v0.8h \n" " sadalp v17.4s, v1.8h \n" " smull v0.8h, v4.8b, v29.8b \n" " smull v1.8h, v5.8b, v29.8b \n" " smlal v0.8h, v6.8b, v27.8b \n" " smlal v1.8h, v7.8b, v27.8b \n" " sadalp v20.4s, v0.8h \n" " sadalp v21.4s, v1.8h \n" " // start v10v11, v14v15, v18v19, v22v23\n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x15], #16 \n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v10.4s, v0.8h \n" " sadalp v11.4s, v1.8h \n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v14.4s, v0.8h \n" " sadalp v15.4s, v1.8h \n" " smull v0.8h, v4.8b, v28.8b \n" " smull v1.8h, v5.8b, v28.8b \n" " smlal v0.8h, v6.8b, v26.8b \n" " smlal v1.8h, v7.8b, v26.8b \n" " sadalp v18.4s, v0.8h \n" " sadalp v19.4s, v1.8h \n" " smull v0.8h, v4.8b, v29.8b \n" " smull v1.8h, v5.8b, v29.8b \n" " smlal v0.8h, v6.8b, v27.8b \n" " smlal v1.8h, v7.8b, v27.8b \n" " sadalp v22.4s, v0.8h \n" " sadalp v23.4s, v1.8h \n" " add %0, %0, #32 \n" " add %1, %1, #32 \n" " subs w20, w20, #2 \n" " bne 2b \n" // start nd2 " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v10.4s, v10.4s, v11.4s\n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v16.4s, v16.4s, v17.4s\n" " addp v18.4s, v18.4s, v19.4s\n" " addp v20.4s, v20.4s, v21.4s\n" " addp v22.4s, v22.4s, v23.4s\n" " addp v8.4s, v8.4s, v10.4s \n" " addp v9.4s, v12.4s, v14.4s \n" " addp v10.4s, v16.4s, v18.4s\n" " addp v11.4s, v20.4s, v22.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w8, #0 \n" " beq 4f \n" " // start subkernel_m4n4k4\n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " sxtl v5.8h, v5.8b \n" " mov v7.d[0], v5.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " smull v15.4s, v2.4h, v7.4h \n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " smull v16.4s, v3.4h, v4.4h \n" " add v8.4s, v8.4s, v12.4s \n" " smull v17.4s, v3.4h, v6.4h \n" " smull v18.4s, v3.4h, v5.4h \n" " addp v16.4s, v16.4s, v17.4s\n" " smull v19.4s, v3.4h, v7.4h \n" " addp v18.4s, v18.4s, v19.4s\n" " addp v16.4s, v16.4s, v18.4s\n" " add v9.4s, v9.4s, v16.4s \n" " ld1 {v2.8b}, [%0], #8 // load next A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " smull v15.4s, v2.4h, v7.4h \n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " smull v16.4s, v3.4h, v4.4h \n" " add v10.4s, v10.4s, v12.4s \n" " smull v17.4s, v3.4h, v6.4h \n" " smull v18.4s, v3.4h, v5.4h \n" " addp v16.4s, v16.4s, v17.4s\n" " smull v19.4s, v3.4h, v7.4h \n" " addp v18.4s, v18.4s, v19.4s\n" " addp v16.4s, v16.4s, v18.4s\n" " add v11.4s, v11.4s, v16.4s \n" " 4: \n" " cmp %w9, #0 \n" " beq 5f \n" " // start subkernel_m4n4k2 \n" " ld1 {v0.8b}, [%1], #8 // load B2x4 \n" " // 00 11 22 33 \n" " rev32 v1.4h, v0.4h // 11 00 33 22 \n" " rev64 v2.2s, v0.2s // 22 33 00 11 \n" " ld1 {v4.8b}, [%0], #8 // load A4x2 \n" " rev64 v3.4h, v0.4h // 33 22 11 00 \n" " smull v12.8h, v4.8b, v0.8b \n" " smull v13.8h, v4.8b, v1.8b \n" " saddlp v12.4s, v12.8h \n" " smull v14.8h, v4.8b, v2.8b \n" " saddlp v13.4s, v13.8h \n" " smull v15.8h, v4.8b, v3.8b \n" " saddlp v14.4s, v14.8h \n" " saddlp v15.4s, v15.8h \n" " mov v16.s[0], v12.s[0] \n" " mov v16.s[1], v13.s[0] \n" " mov v16.s[2], v14.s[0] \n" " mov v16.s[3], v15.s[0] \n" " mov v17.s[0], v13.s[1] \n" " mov v17.s[1], v12.s[1] \n" " mov v17.s[2], v15.s[1] \n" " mov v17.s[3], v14.s[1] \n" " mov v18.s[0], v14.s[2] \n" " mov v18.s[1], v15.s[2] \n" " mov v18.s[2], v12.s[2] \n" " mov v18.s[3], v13.s[2] \n" " mov v19.s[0], v15.s[3] \n" " mov v19.s[1], v14.s[3] \n" " mov v19.s[2], v13.s[3] \n" " mov v19.s[3], v12.s[3] \n" " add v8.4s, v8.4s, v16.4s \n" " add v9.4s, v9.4s, v17.4s \n" " add v10.4s, v10.4s, v18.4s \n" " add v11.4s, v11.4s, v19.4s \n" " 5: \n" " cmp %w10, #0 \n" " beq 6f \n" " // start subkernel_m4n4k1\n" " ld1 {v4.8b}, [%1] // load B1x4\n" " add %1, %1, #4 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #4 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0] \n" " smlal v9.4s, v4.4h, v2.h[1] \n" " smlal v10.4s, v4.4h, v2.h[2] \n" " smlal v11.4s, v4.4h, v2.h[3] \n" " 6: \n" " cmp %12, #0 \n" " beq 9f \n" " ld1 {v12.4s}, [%12] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " scvtf v9.4s, v9.4s \n" " scvtf v10.4s, v10.4s \n" " scvtf v11.4s, v11.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.s[0] \n" " fmul v9.4s, v9.4s, v12.s[1] \n" " fmul v10.4s, v10.4s, v12.s[2] \n" " fmul v11.4s, v11.4s, v12.s[3] \n" " cmp %13, #0 \n" " beq 7f \n" " ld1 {v14.4s}, [%13] \n" " dup v15.4s, v14.s[0] \n" " fadd v8.4s, v8.4s, v15.4s \n" " dup v15.4s, v14.s[1] \n" " fadd v9.4s, v9.4s, v15.4s \n" " dup v15.4s, v14.s[2] \n" " fadd v10.4s, v10.4s, v15.4s\n" " dup v15.4s, v14.s[3] \n" " fadd v11.4s, v11.4s, v15.4s\n" " 7: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " fcvtas v9.4s, v9.4s \n" " fcvtas v10.4s, v10.4s \n" " fcvtas v11.4s, v11.4s \n" " // int32 -> int16 \n" " sqxtn v6.4h, v8.4s \n" " sqxtn2 v6.8h, v9.4s \n" " sqxtn v7.4h, v10.4s \n" " sqxtn2 v7.8h, v11.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v6.8h \n" " sqxtn v9.8b, v7.8h \n" " // save \n" " st1 {v8.s}[0], [%2] \n" " add %x2, %x2, #4 \n" " st1 {v8.s}[1], [%3] \n" " add %x3, %x3, #4 \n" " st1 {v9.s}[0], [%4] \n" " add %x4, %x4, #4 \n" " st1 {v9.s}[1], [%5] \n" " add %x5, %x5, #4 \n" " b m4_loopnd4_finish \n" " 9: \n" " st1 {v8.4s}, [%x2], #16 \n" " st1 {v9.4s}, [%x3], #16 \n" " st1 {v10.4s}, [%x4], #16 \n" " st1 {v11.4s}, [%x5], #16 \n" " m4_loopnd4_finish: \n" " subs %x11, %x11, #1 \n" " mov %x0, x8 \n" " bne 8b \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(pc2), // %4 "=r"(pc3), // %5 "=r"(k8_even),// %6 "=r"(k8), // %7 "=r"(k4), // %8 "=r"(k2), // %9 "=r"(k1), // %10 "=r"(n4), // %11 "=r"(scales), // %12 "=r"(bias) // %13 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(pc2), "5"(pc3), "6"(k8_even), "7"(k8), "8"(k4), "9"(k2), "10"(k1), "11"(n4), "12"(scales), "13"(bias) : "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n2 > 0) { asm volatile( " eor v8.8b, v8.8b, v8.8b \n" " eor v9.8b, v9.8b, v9.8b \n" " eor v10.8b, v10.8b, v10.8b \n" " eor v11.8b, v11.8b, v11.8b \n" " eor v12.8b, v12.8b, v12.8b \n" " eor v13.8b, v13.8b, v13.8b \n" " eor v14.8b, v14.8b, v14.8b \n" " eor v15.8b, v15.8b, v15.8b \n" " eor v16.8b, v16.8b, v16.8b \n" " eor v17.8b, v17.8b, v17.8b \n" " eor v18.8b, v18.8b, v18.8b \n" " eor v19.8b, v19.8b, v19.8b \n" " eor v20.8b, v20.8b, v20.8b \n" " eor v21.8b, v21.8b, v21.8b \n" " eor v22.8b, v22.8b, v22.8b \n" " eor v23.8b, v23.8b, v23.8b \n" "m4_nd2_start: \n" " mov x8, %x0 // PanelA \n" " cmp %w7, #0 \n" " beq 1f // k <= 7 \n" " mov w20, %w7 \n" " cmp %w6, #0 \n" " beq 2f// loop number is even \n" " // start loopkd8_nd2 \n" " subs w20, w20, #1 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v16.4s, v0.8h \n" " saddlp v20.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v17.4s, v0.8h \n" " saddlp v21.4s, v1.8h \n" " cmp w20, #0 \n" " beq 3f \n" " 2: \n" " add x15, %1, #16 \n" " add x14, %0, #32 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x15], #16 \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v24.8b, v25.8b}, [x14], #16 \n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v8.4s, v0.8h \n" " sadalp v9.4s, v1.8h \n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h \n" " sadalp v13.4s, v1.8h \n" " // finish v8v9 v12v13, start proc v16v17,v20v21\n" " ld1 {v28.8b, v29.8b}, [%0], #16\n" " smull v0.8h, v4.8b, v28.8b\n" " smull v1.8h, v5.8b, v28.8b\n" " ld1 {v26.8b, v27.8b}, [x14], #16\n" " smlal v0.8h, v6.8b, v26.8b\n" " smlal v1.8h, v7.8b, v26.8b\n" " sadalp v16.4s, v0.8h\n" " sadalp v17.4s, v1.8h\n" " smull v0.8h, v4.8b, v29.8b\n" " smull v1.8h, v5.8b, v29.8b\n" " smlal v0.8h, v6.8b, v27.8b\n" " smlal v1.8h, v7.8b, v27.8b\n" " sadalp v20.4s, v0.8h\n" " sadalp v21.4s, v1.8h\n" " add %0, %0, #32 \n" " add %1, %1, #16 \n" " subs w20, w20, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v16.4s, v16.4s, v17.4s\n" " addp v20.4s, v20.4s, v21.4s\n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " addp v16.4s, v16.4s, v16.4s\n" " addp v20.4s, v20.4s, v20.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w8, 0 \n" " beq 4f \n" " // start subkernel_m4n2k4 \n" " ld1 {v4.8b}, [%1], #8 // load B4x2\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load first A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v9.4s, v2.4h, v4.4h \n" " smull v10.4s, v2.4h, v6.4h \n" " addp v9.4s, v9.4s, v10.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v3.4h, v4.4h \n" " smull v14.4s, v3.4h, v6.4h \n" " addp v13.4s, v13.4s, v14.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " ld1 {v2.8b}, [%0], #8 // load next A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v17.4s, v2.4h, v4.4h \n" " smull v18.4s, v2.4h, v6.4h \n" " addp v17.4s, v17.4s, v18.4s\n" " addp v17.4s, v17.4s, v17.4s\n" " add v16.4s, v16.4s, v17.4s \n" " smull v21.4s, v3.4h, v4.4h \n" " smull v22.4s, v3.4h, v6.4h \n" " addp v21.4s, v21.4s, v22.4s\n" " addp v21.4s, v21.4s, v21.4s\n" " add v20.4s, v20.4s, v21.4s \n" " 4: \n" " cmp %w9, 0 \n" " beq 5f \n" " // start subkernel_m4n2k2 \n" " ld1 {v4.8b}, [%0], #8 //load A4x2\n" " ld1 {v0.8b}, [%1] // load B2x2 \n" " add %1, %1, #4 \n" " // 00 11 22 33 \n" " rev32 v1.4h, v0.4h // 11 00 33 22 \n" " rev64 v2.2s, v0.2s // 22 33 00 11 \n" " rev64 v3.4h, v0.4h // 33 22 11 00 \n" " smull v21.8h, v4.8b, v0.8b \n" " smull v22.8h, v4.8b, v1.8b \n" " smull v23.8h, v4.8b, v2.8b \n" " smull v24.8h, v4.8b, v3.8b \n" " saddlp v21.4s, v21.8h \n" " saddlp v22.4s, v22.8h \n" " saddlp v23.4s, v23.8h \n" " saddlp v24.4s, v24.8h \n" " mov v9.s[0], v21.s[0] \n" " mov v9.s[1], v22.s[0] \n" " add v8.4s, v8.4s, v9.4s\n" " mov v13.s[0], v22.s[1] \n" " mov v13.s[1], v21.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " mov v17.s[0], v23.s[2] \n" " mov v17.s[1], v24.s[2] \n" " add v16.4s, v16.4s, v17.4s \n" " mov v21.s[0], v24.s[3] \n" " mov v21.s[1], v23.s[3] \n" " add v20.4s, v20.4s, v21.4s \n" " 5: \n" " cmp %w10, 0 \n" " beq 6f \n" " // start subkernel_m4n2k1\n" " ld1 {v4.8b}, [%1] // load B1x2\n" " add %1, %1, #2 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #4 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0] \n" " smlal v12.4s, v4.4h, v2.h[1] \n" " smlal v16.4s, v4.4h, v2.h[2] \n" " smlal v20.4s, v4.4h, v2.h[3] \n" " 6: \n" " cmp %11, #0 \n" " beq 7f \n" " mov v8.d[1], v12.d[0] \n" " mov v16.d[1], v20.d[0] \n" " // v12: 0 1 2 3 \n" " ld1 {v12.4s}, [%11] \n" " zip2 v13.4s, v12.4s, v12.4s \n" " zip1 v12.4s, v12.4s, v12.4s \n" " // v12: 0 0 1 1 \n" " // v13: 2 2 3 3 \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " scvtf v16.4s, v16.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.4s \n" " fmul v16.4s, v16.4s, v13.4s\n" " cmp %12, #0 \n" " beq 8f // skip add scales \n" " // fp32 += scales_tm \n" " ld1 {v12.4s}, [%12] \n" " zip2 v13.4s, v12.4s, v12.4s\n" " zip1 v12.4s, v12.4s, v12.4s\n" " fadd v8.4s, v8.4s, v12.4s \n" " fadd v16.4s, v16.4s, v13.4s\n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " fcvtas v16.4s, v16.4s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " sqxtn v16.4h, v16.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " sqxtn v16.8b, v16.8h \n" " // save \n" " st1 {v8.h}[0], [%2] \n" " add %2, %2, #2 \n" " st1 {v8.h}[1], [%3] \n" " add %3, %3, #2 \n" " st1 {v16.h}[0], [%4] \n" " add %4, %4, #2 \n" " st1 {v16.h}[1], [%5] \n" " add %5, %5, #2 \n" " b m4_loopnd2_finish \n" " 7: \n" " st1 {v8.2s}, [%2], #8 \n" " st1 {v12.2s}, [%3], #8 \n" " st1 {v16.2s}, [%4], #8 \n" " st1 {v20.2s}, [%5], #8 \n" " m4_loopnd2_finish: \n" " mov %0, x8 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(pc2), // %4 "=r"(pc3), // %5 "=r"(k8_even),// %6 "=r"(k8), // %7 "=r"(k4), // %8 "=r"(k2), // %9 "=r"(k1), // %10 "=r"(scales), // %11 "=r"(bias) // %12 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(pc2), "5"(pc3), "6"(k8_even), "7"(k8), "8"(k4), "9"(k2), "10"(k1), "11"(scales), "12"(bias) : "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n1 > 0) { asm volatile( " eor v8.8b, v8.8b, v8.8b \n" " eor v9.8b, v9.8b, v9.8b \n" " eor v10.8b, v10.8b, v10.8b \n" " eor v11.8b, v11.8b, v11.8b \n" " eor v12.8b, v12.8b, v12.8b \n" " eor v13.8b, v13.8b, v13.8b \n" " eor v14.8b, v14.8b, v14.8b \n" " eor v15.8b, v15.8b, v15.8b \n" " eor v16.8b, v16.8b, v16.8b \n" " eor v17.8b, v17.8b, v17.8b \n" " eor v18.8b, v18.8b, v18.8b \n" " eor v19.8b, v19.8b, v19.8b \n" " eor v20.8b, v20.8b, v20.8b \n" " eor v21.8b, v21.8b, v21.8b \n" " eor v22.8b, v22.8b, v22.8b \n" " eor v23.8b, v23.8b, v23.8b \n" "m4_n1_start: \n" " cmp %w7, #0 \n" " beq 10f \n" " mov w20, %w7 \n" " cmp %w6, #0 \n" " beq 11f// loop number is even \n" " // start loopkd8_nd1 \n" " subs w20, w20, #1 \n" " ld1 {v4.8b}, [%1], #8 // load four lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v16.4s, v0.8h \n" " saddlp v20.4s, v1.8h \n" " cmp w20, #0 \n" " beq 12f \n" " 11: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n" " ld1 {v28.8b, v29.8b, v30.8b, v31.8b}, [%0], #32\n" " smull v0.8h, v24.8b, v4.8b \n" " smlal v0.8h, v28.8b, v5.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v25.8b, v4.8b \n" " smlal v1.8h, v29.8b, v5.8b \n" " sadalp v12.4s, v1.8h \n" " smull v0.8h, v26.8b, v4.8b \n" " smlal v0.8h, v30.8b, v5.8b \n" " sadalp v16.4s, v0.8h \n" " smull v1.8h, v27.8b, v4.8b \n" " smlal v1.8h, v31.8b, v5.8b \n" " sadalp v20.4s, v1.8h \n" " subs w20, w20, #2 \n" " bne 11b \n" " 12: \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " addp v12.4s, v12.4s, v12.4s\n" " addp v16.4s, v16.4s, v16.4s\n" " addp v16.4s, v16.4s, v16.4s\n" " addp v20.4s, v20.4s, v20.4s\n" " addp v20.4s, v20.4s, v20.4s\n" " // start process kd4 kd2 kd1 cases\n" " 10: \n" " cmp %w8, #0 \n" " beq 13f \n" " // start subkernel_m4n1k2 \n" " ld1 {v4.8b}, [%1] // load B4x1\n" " add %x1, %x1, #4 \n" " sxtl v4.8h, v4.8b // extend B4x1 to v4 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load A4x4\n" " sxtl v2.8h, v2.8b \n" " mov v5.d[0], v2.d[1] \n" " sxtl v3.8h, v3.8b \n" " mov v6.d[0], v3.d[1] // extend A4x4 to v2,v5,v3,v6\n" " smull v9.4s, v2.4h, v4.4h \n" " addp v9.4s, v9.4s, v9.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v5.4h, v4.4h \n" " addp v13.4s, v13.4s, v13.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " smull v17.4s, v3.4h, v4.4h \n" " addp v17.4s, v17.4s, v17.4s\n" " addp v17.4s, v17.4s, v17.4s\n" " add v16.4s, v16.4s, v17.4s \n" " smull v21.4s, v6.4h, v4.4h \n" " addp v21.4s, v21.4s, v21.4s\n" " addp v21.4s, v21.4s, v21.4s\n" " add v20.4s, v20.4s, v21.4s \n" " 13: \n" " cmp %w9, #0 \n" " beq 14f \n" " // start subkernel_m4n1k2 \n" " ld1 {v4.8b}, [%0], #8 // load A4x2 \n" " ld1 {v0.8b}, [%1] // load B2x1 \n" " add %1, %1, #2 \n" " mov v0.h[1], v0.h[0] \n" " mov v0.s[1], v0.s[0] \n" " smull v0.8h, v0.8b, v4.8b \n" " saddlp v0.4s, v0.8h \n" " mov v9.s[0], v0.s[0] \n" " add v8.4s, v8.4s, v9.4s \n" " mov v13.s[0], v0.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " mov v17.s[0], v0.s[2] \n" " add v16.4s, v16.4s, v17.4s \n" " mov v21.s[0], v0.s[3] \n" " add v20.4s, v20.4s, v21.4s \n" " 14: \n" " cmp %w10, #0 \n" " beq 15f \n" " // start subkernel_m4n1k1 \n" " ld1 {v4.8b}, [%1] // load B1x1\n" " add %1, %1, #1 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #4 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smull v0.4s, v2.4h, v4.h[0]\n" " add v8.4s, v8.4s, v0.4s \n" " mov v13.s[0], v0.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " mov v17.s[0], v0.s[2] \n" " add v16.4s, v16.4s, v17.4s \n" " mov v21.s[0], v0.s[3] \n" " add v20.4s, v20.4s, v21.4s \n" " 15: \n" // REQUANT " cmp %11, #0 \n" " beq 16f \n" " mov v8.s[1], v12.s[0] \n" " mov v8.s[2], v16.s[0] \n" " mov v8.s[3], v20.s[0] \n" " // v12: s0 s1 s2 s3 \n" " ld1 {v12.4s}, [%11] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.4s \n" " cmp %12, #0 \n" " beq 17f \n" " // fp32 += bias_tm \n" " ld1 {v12.4s}, [%12] \n" " fadd v8.4s, v8.4s, v12.4s \n" " 17: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.b}[0], [%2] \n" " st1 {v8.b}[1], [%3] \n" " st1 {v8.b}[2], [%4] \n" " st1 {v8.b}[3], [%5] \n" " b m4_finish \n" " // no need to add the last output pointer\n" " 16: \n" " st1 {v8.s}[0], [%2] \n" " st1 {v12.s}[0], [%3] \n" " st1 {v16.s}[0], [%4] \n" " st1 {v20.s}[0], [%5] \n" " m4_finish: \n" " mov x0, #0 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(pc2), // %4 "=r"(pc3), // %5 "=r"(k8_even),// %6 "=r"(k8), // %7 "=r"(k4), // %8 "=r"(k2), // %9 "=r"(k1), // %10 "=r"(scales), // %11 "=r"(bias) // %12 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(pc2), "5"(pc3), "6"(k8_even), "7"(k8), "8"(k4), "9"(k2), "10"(k1), "11"(scales), "12"(bias) : "cc", "memory", "x0", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } } #undef DECOMPOSE_K #undef DECOMPOSE_N void int8kernel(void* dst, const int8_t* sa, const int8_t* sb, int m, int k, int n, int ldc, float* scales, float* bias, const Option& opt) { int8_t* pa = (int8_t*)sa; int8_t* pb = (int8_t*)sb; const int nn = (m >> 2) << 2; if (scales == nullptr) { int32_t* pc = (int32_t*)dst; #if PRINT_MATRIX int32_t* origin = pc; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < nn; i += 4) { int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, nullptr, nullptr); } pa += nn * k; pc += nn * ldc; switch(m-nn) { case 3: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr); pc += 2 * ldc; pa += 2 * k; int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr); break; case 2: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr); break; case 1: int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr); break; case 0: default: break; } #if PRINT_MATRIX print_int32_matrix("pc", origin, m, n, ldc); #endif } else { int8_t* pc = (int8_t*)dst; #if PRINT_MATRIX print_fp32_vec("scales", scales, m); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < nn; i += 4) { int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, scales + i, (bias==nullptr)? nullptr: bias+i); } pa += nn * k; pc += nn * ldc; scales += nn; bias = (bias == nullptr)? nullptr: bias + nn; switch(m-nn) { case 3: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias); pc += 2 * ldc; pa += 2 * k; scales += 2; bias = (bias == nullptr)? nullptr: bias + 2; int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias); break; case 2: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias); break; case 1: int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias); break; case 0: default: break; } } return; } #ifdef PRINT_MATRIX #undef PRINT_MATRIX #endif #endif
gimplify.c
/* * Copyright (C) 2007. QLogic Corporation. All Rights Reserved. */ /* Tree lowering pass. This pass converts the GENERIC functions-as-trees tree representation into the GIMPLE form. Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. Major work done by Sebastian Pop <s.pop@laposte.net>, Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "varray.h" #include "tree-gimple.h" #include "tree-inline.h" #include "diagnostic.h" #include "langhooks.h" #include "langhooks-def.h" #include "tree-flow.h" #include "cgraph.h" #include "timevar.h" #include "except.h" #include "hashtab.h" #include "flags.h" #include "real.h" #include "function.h" #include "output.h" #include "expr.h" #include "ggc.h" #include "toplev.h" #include "target.h" #include "optabs.h" #include "pointer-set.h" #ifdef KEY #include "gspin-gcc-interface.h" #endif enum gimplify_omp_var_data { GOVD_SEEN = 1, GOVD_EXPLICIT = 2, GOVD_SHARED = 4, GOVD_PRIVATE = 8, GOVD_FIRSTPRIVATE = 16, GOVD_LASTPRIVATE = 32, GOVD_REDUCTION = 64, GOVD_LOCAL = 128, GOVD_DEBUG_PRIVATE = 256, GOVD_PRIVATE_OUTER_REF = 512, GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LOCAL) }; enum omp_region_type { ORT_WORKSHARE = 0, ORT_TASK = 1, ORT_PARALLEL = 2, ORT_COMBINED_PARALLEL = 3 }; struct gimplify_omp_ctx { struct gimplify_omp_ctx *outer_context; splay_tree variables; struct pointer_set_t *privatized_types; location_t location; enum omp_clause_default_kind default_kind; enum omp_region_type region_type; }; struct gimplify_ctx { struct gimplify_ctx *prev_context; tree current_bind_expr; tree temps; tree conditional_cleanups; tree exit_label; tree return_temp; VEC(tree,heap) *case_labels; /* The formal temporary table. Should this be persistent? */ htab_t temp_htab; int conditions; bool save_stack; bool into_ssa; }; static struct gimplify_ctx *gimplify_ctxp; static struct gimplify_omp_ctx *gimplify_omp_ctxp; /* Formal (expression) temporary table handling: Multiple occurrences of the same scalar expression are evaluated into the same temporary. */ typedef struct gimple_temp_hash_elt { tree val; /* Key */ tree temp; /* Value */ } elt_t; /* Forward declarations. */ static enum gimplify_status gimplify_compound_expr (tree *, tree *, bool); #ifdef ENABLE_CHECKING static bool cpt_same_type (tree a, tree b); #endif /* Return a hash value for a formal temporary table entry. */ static hashval_t gimple_tree_hash (const void *p) { tree t = ((const elt_t *) p)->val; return iterative_hash_expr (t, 0); } /* Compare two formal temporary table entries. */ static int gimple_tree_eq (const void *p1, const void *p2) { tree t1 = ((const elt_t *) p1)->val; tree t2 = ((const elt_t *) p2)->val; enum tree_code code = TREE_CODE (t1); if (TREE_CODE (t2) != code || TREE_TYPE (t1) != TREE_TYPE (t2)) return 0; if (!operand_equal_p (t1, t2, 0)) return 0; /* Only allow them to compare equal if they also hash equal; otherwise results are nondeterminate, and we fail bootstrap comparison. */ gcc_assert (gimple_tree_hash (p1) == gimple_tree_hash (p2)); return 1; } /* Set up a context for the gimplifier. */ void push_gimplify_context (void) { struct gimplify_ctx *c; c = (struct gimplify_ctx *) xcalloc (1, sizeof (struct gimplify_ctx)); c->prev_context = gimplify_ctxp; if (optimize) c->temp_htab = htab_create (1000, gimple_tree_hash, gimple_tree_eq, free); gimplify_ctxp = c; } /* Tear down a context for the gimplifier. If BODY is non-null, then put the temporaries into the outer BIND_EXPR. Otherwise, put them in the unexpanded_var_list. */ void pop_gimplify_context (tree body) { struct gimplify_ctx *c = gimplify_ctxp; tree t; gcc_assert (c && !c->current_bind_expr); gimplify_ctxp = c->prev_context; for (t = c->temps; t ; t = TREE_CHAIN (t)) DECL_GIMPLE_FORMAL_TEMP_P (t) = 0; if (body) declare_vars (c->temps, body, false); else record_vars (c->temps); if (optimize) htab_delete (c->temp_htab); free (c); } static void gimple_push_bind_expr (tree bind) { TREE_CHAIN (bind) = gimplify_ctxp->current_bind_expr; gimplify_ctxp->current_bind_expr = bind; } static void gimple_pop_bind_expr (void) { gimplify_ctxp->current_bind_expr = TREE_CHAIN (gimplify_ctxp->current_bind_expr); } tree gimple_current_bind_expr (void) { return gimplify_ctxp->current_bind_expr; } /* Returns true iff there is a COND_EXPR between us and the innermost CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */ static bool gimple_conditional_context (void) { return gimplify_ctxp->conditions > 0; } /* Note that we've entered a COND_EXPR. */ static void gimple_push_condition (void) { #ifdef ENABLE_CHECKING if (gimplify_ctxp->conditions == 0) gcc_assert (!gimplify_ctxp->conditional_cleanups); #endif ++(gimplify_ctxp->conditions); } /* Note that we've left a COND_EXPR. If we're back at unconditional scope now, add any conditional cleanups we've seen to the prequeue. */ static void gimple_pop_condition (tree *pre_p) { int conds = --(gimplify_ctxp->conditions); gcc_assert (conds >= 0); if (conds == 0) { append_to_statement_list (gimplify_ctxp->conditional_cleanups, pre_p); gimplify_ctxp->conditional_cleanups = NULL_TREE; } } /* A stable comparison routine for use with splay trees and DECLs. */ static int splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb) { tree a = (tree) xa; tree b = (tree) xb; return DECL_UID (a) - DECL_UID (b); } /* Create a new omp construct that deals with variable remapping. */ static struct gimplify_omp_ctx * new_omp_context (enum omp_region_type region_type) { struct gimplify_omp_ctx *c; c = XCNEW (struct gimplify_omp_ctx); c->outer_context = gimplify_omp_ctxp; c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0); c->privatized_types = pointer_set_create (); c->location = input_location; c->region_type = region_type; if (region_type != ORT_TASK) c->default_kind = OMP_CLAUSE_DEFAULT_SHARED; else c->default_kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; return c; } /* Destroy an omp construct that deals with variable remapping. */ static void delete_omp_context (struct gimplify_omp_ctx *c) { splay_tree_delete (c->variables); pointer_set_destroy (c->privatized_types); XDELETE (c); } static void omp_add_variable (struct gimplify_omp_ctx *, tree, unsigned int); static bool omp_notice_variable (struct gimplify_omp_ctx *, tree, bool); /* A subroutine of append_to_statement_list{,_force}. T is not NULL. */ static void append_to_statement_list_1 (tree t, tree *list_p) { tree list = *list_p; tree_stmt_iterator i; if (!list) { if (t && TREE_CODE (t) == STATEMENT_LIST) { *list_p = t; return; } *list_p = list = alloc_stmt_list (); } i = tsi_last (list); tsi_link_after (&i, t, TSI_CONTINUE_LINKING); } /* Add T to the end of the list container pointed to by LIST_P. If T is an expression with no effects, it is ignored. */ void append_to_statement_list (tree t, tree *list_p) { if (t && TREE_SIDE_EFFECTS (t)) append_to_statement_list_1 (t, list_p); } /* Similar, but the statement is always added, regardless of side effects. */ void append_to_statement_list_force (tree t, tree *list_p) { if (t != NULL_TREE) append_to_statement_list_1 (t, list_p); } /* Both gimplify the statement T and append it to LIST_P. */ void gimplify_and_add (tree t, tree *list_p) { gimplify_stmt (&t); append_to_statement_list (t, list_p); } /* Strip off a legitimate source ending from the input string NAME of length LEN. Rather than having to know the names used by all of our front ends, we strip off an ending of a period followed by up to five characters. (Java uses ".class".) */ static inline void remove_suffix (char *name, int len) { int i; for (i = 2; i < 8 && len > i; i++) { if (name[len - i] == '.') { name[len - i] = '\0'; break; } } } /* Create a nameless artificial label and put it in the current function context. Returns the newly created label. */ tree create_artificial_label (void) { tree lab = build_decl (LABEL_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (lab) = 1; DECL_IGNORED_P (lab) = 1; DECL_CONTEXT (lab) = current_function_decl; return lab; } /* Subroutine for find_single_pointer_decl. */ static tree find_single_pointer_decl_1 (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data) { tree *pdecl = (tree *) data; if (DECL_P (*tp) && POINTER_TYPE_P (TREE_TYPE (*tp))) { if (*pdecl) { /* We already found a pointer decl; return anything other than NULL_TREE to unwind from walk_tree signalling that we have a duplicate. */ return *tp; } *pdecl = *tp; } return NULL_TREE; } /* Find the single DECL of pointer type in the tree T and return it. If there are zero or more than one such DECLs, return NULL. */ static tree find_single_pointer_decl (tree t) { tree decl = NULL_TREE; if (walk_tree (&t, find_single_pointer_decl_1, &decl, NULL)) { /* find_single_pointer_decl_1 returns a nonzero value, causing walk_tree to return a nonzero value, to indicate that it found more than one pointer DECL. */ return NULL_TREE; } return decl; } /* Create a new temporary name with PREFIX. Returns an identifier. */ static GTY(()) unsigned int tmp_var_id_num; tree create_tmp_var_name (const char *prefix) { char *tmp_name; if (prefix) { char *preftmp = ASTRDUP (prefix); remove_suffix (preftmp, strlen (preftmp)); prefix = preftmp; } ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix ? prefix : "T", tmp_var_id_num++); return get_identifier (tmp_name); } /* Create a new temporary variable declaration of type TYPE. Does NOT push it into the current binding. */ tree create_tmp_var_raw (tree type, const char *prefix) { tree tmp_var; tree new_type; /* Make the type of the variable writable. */ new_type = build_type_variant (type, 0, 0); TYPE_ATTRIBUTES (new_type) = TYPE_ATTRIBUTES (type); tmp_var = build_decl (VAR_DECL, prefix ? create_tmp_var_name (prefix) : NULL, type); /* The variable was declared by the compiler. */ DECL_ARTIFICIAL (tmp_var) = 1; /* And we don't want debug info for it. */ DECL_IGNORED_P (tmp_var) = 1; /* Make the variable writable. */ TREE_READONLY (tmp_var) = 0; DECL_EXTERNAL (tmp_var) = 0; TREE_STATIC (tmp_var) = 0; TREE_USED (tmp_var) = 1; return tmp_var; } /* Create a new temporary variable declaration of type TYPE. DOES push the variable into the current binding. Further, assume that this is called only from gimplification or optimization, at which point the creation of certain types are bugs. */ tree create_tmp_var (tree type, const char *prefix) { tree tmp_var; /* We don't allow types that are addressable (meaning we can't make copies), or incomplete. We also used to reject every variable size objects here, but now support those for which a constant upper bound can be obtained. The processing for variable sizes is performed in gimple_add_tmp_var, point at which it really matters and possibly reached via paths not going through this function, e.g. after direct calls to create_tmp_var_raw. */ gcc_assert (!TREE_ADDRESSABLE (type) && COMPLETE_TYPE_P (type)); tmp_var = create_tmp_var_raw (type, prefix); gimple_add_tmp_var (tmp_var); return tmp_var; } /* Given a tree, try to return a useful variable name that we can use to prefix a temporary that is being assigned the value of the tree. I.E. given <temp> = &A, return A. */ const char * get_name (tree t) { tree stripped_decl; stripped_decl = t; STRIP_NOPS (stripped_decl); if (DECL_P (stripped_decl) && DECL_NAME (stripped_decl)) return IDENTIFIER_POINTER (DECL_NAME (stripped_decl)); else { switch (TREE_CODE (stripped_decl)) { case ADDR_EXPR: return get_name (TREE_OPERAND (stripped_decl, 0)); break; default: return NULL; } } } /* Create a temporary with a name derived from VAL. Subroutine of lookup_tmp_var; nobody else should call this function. */ static inline tree create_tmp_from_val (tree val) { return create_tmp_var (TYPE_MAIN_VARIANT (TREE_TYPE (val)), get_name (val)); } /* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse an existing expression temporary. */ static tree lookup_tmp_var (tree val, bool is_formal) { tree ret; /* If not optimizing, never really reuse a temporary. local-alloc won't allocate any variable that is used in more than one basic block, which means it will go into memory, causing much extra work in reload and final and poorer code generation, outweighing the extra memory allocation here. */ if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val)) ret = create_tmp_from_val (val); else { elt_t elt, *elt_p; void **slot; elt.val = val; slot = htab_find_slot (gimplify_ctxp->temp_htab, (void *)&elt, INSERT); if (*slot == NULL) { elt_p = XNEW (elt_t); elt_p->val = val; elt_p->temp = ret = create_tmp_from_val (val); *slot = (void *) elt_p; } else { elt_p = (elt_t *) *slot; ret = elt_p->temp; } } if (is_formal) DECL_GIMPLE_FORMAL_TEMP_P (ret) = 1; return ret; } /* Returns a formal temporary variable initialized with VAL. PRE_P is as in gimplify_expr. Only use this function if: 1) The value of the unfactored expression represented by VAL will not change between the initialization and use of the temporary, and 2) The temporary will not be otherwise modified. For instance, #1 means that this is inappropriate for SAVE_EXPR temps, and #2 means it is inappropriate for && temps. For other cases, use get_initialized_tmp_var instead. */ static tree internal_get_tmp_var (tree val, tree *pre_p, tree *post_p, bool is_formal) { tree t, mod; gimplify_expr (&val, pre_p, post_p, is_gimple_formal_tmp_rhs, fb_rvalue); t = lookup_tmp_var (val, is_formal); if (is_formal) { tree u = find_single_pointer_decl (val); if (u && TREE_CODE (u) == VAR_DECL && DECL_BASED_ON_RESTRICT_P (u)) u = DECL_GET_RESTRICT_BASE (u); if (u && TYPE_RESTRICT (TREE_TYPE (u))) { if (DECL_BASED_ON_RESTRICT_P (t)) gcc_assert (u == DECL_GET_RESTRICT_BASE (t)); else { DECL_BASED_ON_RESTRICT_P (t) = 1; SET_DECL_RESTRICT_BASE (t, u); } } } if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; mod = build2 (INIT_EXPR, TREE_TYPE (t), t, val); if (EXPR_HAS_LOCATION (val)) SET_EXPR_LOCUS (mod, EXPR_LOCUS (val)); else SET_EXPR_LOCATION (mod, input_location); /* gimplify_modify_expr might want to reduce this further. */ gimplify_and_add (mod, pre_p); /* If we're gimplifying into ssa, gimplify_modify_expr will have given our temporary an ssa name. Find and return it. */ if (gimplify_ctxp->into_ssa) t = TREE_OPERAND (mod, 0); return t; } /* Returns a formal temporary variable initialized with VAL. PRE_P points to a statement list where side-effects needed to compute VAL should be stored. */ tree get_formal_tmp_var (tree val, tree *pre_p) { return internal_get_tmp_var (val, pre_p, NULL, true); } /* Returns a temporary variable initialized with VAL. PRE_P and POST_P are as in gimplify_expr. */ tree get_initialized_tmp_var (tree val, tree *pre_p, tree *post_p) { return internal_get_tmp_var (val, pre_p, post_p, false); } /* Declares all the variables in VARS in SCOPE. If DEBUG_INFO is true, generate debug info for them; otherwise don't. */ void declare_vars (tree vars, tree scope, bool debug_info) { tree last = vars; if (last) { tree temps, block; /* C99 mode puts the default 'return 0;' for main outside the outer braces. So drill down until we find an actual scope. */ while (TREE_CODE (scope) == COMPOUND_EXPR) scope = TREE_OPERAND (scope, 0); gcc_assert (TREE_CODE (scope) == BIND_EXPR); temps = nreverse (last); block = BIND_EXPR_BLOCK (scope); if (!block || !debug_info) { TREE_CHAIN (last) = BIND_EXPR_VARS (scope); BIND_EXPR_VARS (scope) = temps; } else { /* We need to attach the nodes both to the BIND_EXPR and to its associated BLOCK for debugging purposes. The key point here is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */ if (BLOCK_VARS (block)) BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps); else { BIND_EXPR_VARS (scope) = chainon (BIND_EXPR_VARS (scope), temps); BLOCK_VARS (block) = temps; } } } } /* For VAR a VAR_DECL of variable size, try to find a constant upper bound for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if no such upper bound can be obtained. */ static void force_constant_size (tree var) { /* The only attempt we make is by querying the maximum size of objects of the variable's type. */ HOST_WIDE_INT max_size; gcc_assert (TREE_CODE (var) == VAR_DECL); max_size = max_int_size_in_bytes (TREE_TYPE (var)); gcc_assert (max_size >= 0); DECL_SIZE_UNIT (var) = build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size); DECL_SIZE (var) = build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT); } void gimple_add_tmp_var (tree tmp) { gcc_assert (!TREE_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp)); /* Later processing assumes that the object size is constant, which might not be true at this point. Force the use of a constant upper bound in this case. */ if (!host_integerp (DECL_SIZE_UNIT (tmp), 1)) force_constant_size (tmp); DECL_CONTEXT (tmp) = current_function_decl; DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1; if (gimplify_ctxp) { TREE_CHAIN (tmp) = gimplify_ctxp->temps; gimplify_ctxp->temps = tmp; /* Mark temporaries local within the nearest enclosing parallel. */ if (gimplify_omp_ctxp) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; while (ctx && ctx->region_type == ORT_WORKSHARE) ctx = ctx->outer_context; if (ctx) omp_add_variable (ctx, tmp, GOVD_LOCAL | GOVD_SEEN); } } else if (cfun) record_vars (tmp); else declare_vars (tmp, DECL_SAVED_TREE (current_function_decl), false); } /* Determines whether to assign a locus to the statement STMT. */ static bool should_carry_locus_p (tree stmt) { /* Don't emit a line note for a label. We particularly don't want to emit one for the break label, since it doesn't actually correspond to the beginning of the loop/switch. */ if (TREE_CODE (stmt) == LABEL_EXPR) return false; /* Do not annotate empty statements, since it confuses gcov. */ if (!TREE_SIDE_EFFECTS (stmt)) return false; return true; } static void annotate_one_with_locus (tree t, location_t locus) { if (EXPR_P (t) && ! EXPR_HAS_LOCATION (t) && should_carry_locus_p (t)) SET_EXPR_LOCATION (t, locus); } void annotate_all_with_locus (tree *stmt_p, location_t locus) { tree_stmt_iterator i; if (!*stmt_p) return; for (i = tsi_start (*stmt_p); !tsi_end_p (i); tsi_next (&i)) { tree t = tsi_stmt (i); /* Assuming we've already been gimplified, we shouldn't see nested chaining constructs anymore. */ gcc_assert (TREE_CODE (t) != STATEMENT_LIST && TREE_CODE (t) != COMPOUND_EXPR); annotate_one_with_locus (t, locus); } } /* Similar to copy_tree_r() but do not copy SAVE_EXPR or TARGET_EXPR nodes. These nodes model computations that should only be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. */ static tree mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data) { enum tree_code code = TREE_CODE (*tp); /* Don't unshare types, decls, constants and SAVE_EXPR nodes. */ if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant || code == SAVE_EXPR || code == TARGET_EXPR /* We can't do anything sensible with a BLOCK used as an expression, but we also can't just die when we see it because of non-expression uses. So just avert our eyes and cross our fingers. Silly Java. */ || code == BLOCK) *walk_subtrees = 0; else { gcc_assert (code != BIND_EXPR); copy_tree_r (tp, walk_subtrees, data); } return NULL_TREE; } /* Callback for walk_tree to unshare most of the shared trees rooted at *TP. If *TP has been visited already (i.e., TREE_VISITED (*TP) == 1), then *TP is deep copied by calling copy_tree_r. This unshares the same trees as copy_tree_r with the exception of SAVE_EXPR nodes. These nodes model computations that should only be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. */ static tree copy_if_shared_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { tree t = *tp; enum tree_code code = TREE_CODE (t); /* Skip types, decls, and constants. But we do want to look at their types and the bounds of types. Mark them as visited so we properly unmark their subtrees on the unmark pass. If we've already seen them, don't look down further. */ if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant) { if (TREE_VISITED (t)) *walk_subtrees = 0; else TREE_VISITED (t) = 1; } /* If this node has been visited already, unshare it and don't look any deeper. */ else if (TREE_VISITED (t)) { walk_tree (tp, mostly_copy_tree_r, NULL, NULL); *walk_subtrees = 0; } /* Otherwise, mark the tree as visited and keep looking. */ else TREE_VISITED (t) = 1; return NULL_TREE; } static tree unmark_visited_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { if (TREE_VISITED (*tp)) TREE_VISITED (*tp) = 0; else *walk_subtrees = 0; return NULL_TREE; } /* Unshare all the trees in BODY_P, a pointer into the body of FNDECL, and the bodies of any nested functions if we are unsharing the entire body of FNDECL. */ static void unshare_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); walk_tree (body_p, copy_if_shared_r, NULL, NULL); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unshare_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Likewise, but mark all trees as not visited. */ static void unvisit_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); walk_tree (body_p, unmark_visited_r, NULL, NULL); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unvisit_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Unshare T and all the trees reached from T via TREE_CHAIN. */ static void unshare_all_trees (tree t) { walk_tree (&t, copy_if_shared_r, NULL, NULL); walk_tree (&t, unmark_visited_r, NULL, NULL); } /* Unconditionally make an unshared copy of EXPR. This is used when using stored expressions which span multiple functions, such as BINFO_VTABLE, as the normal unsharing process can't tell that they're shared. */ tree unshare_expr (tree expr) { walk_tree (&expr, mostly_copy_tree_r, NULL, NULL); return expr; } /* A terser interface for building a representation of an exception specification. */ tree gimple_build_eh_filter (tree body, tree allowed, tree failure) { tree t; /* FIXME should the allowed types go in TREE_TYPE? */ t = build2 (EH_FILTER_EXPR, void_type_node, allowed, NULL_TREE); append_to_statement_list (failure, &EH_FILTER_FAILURE (t)); t = build2 (TRY_CATCH_EXPR, void_type_node, NULL_TREE, t); append_to_statement_list (body, &TREE_OPERAND (t, 0)); return t; } /* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both contain statements and have a value. Assign its value to a temporary and give it void_type_node. Returns the temporary, or NULL_TREE if WRAPPER was already void. */ tree voidify_wrapper_expr (tree wrapper, tree temp) { tree type = TREE_TYPE (wrapper); if (type && !VOID_TYPE_P (type)) { tree *p; /* Set p to point to the body of the wrapper. Loop until we find something that isn't a wrapper. */ for (p = &wrapper; p && *p; ) { switch (TREE_CODE (*p)) { case BIND_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; /* For a BIND_EXPR, the body is operand 1. */ p = &BIND_EXPR_BODY (*p); break; case CLEANUP_POINT_EXPR: case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = &TREE_OPERAND (*p, 0); break; case STATEMENT_LIST: { tree_stmt_iterator i = tsi_last (*p); TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i); } break; case COMPOUND_EXPR: /* Advance to the last statement. Set all container types to void. */ for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1)) { TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; } break; default: goto out; } } out: if (p == NULL || IS_EMPTY_STMT (*p)) temp = NULL_TREE; else if (temp) { /* The wrapper is on the RHS of an assignment that we're pushing down. */ gcc_assert (TREE_CODE (temp) == INIT_EXPR || TREE_CODE (temp) == MODIFY_EXPR); TREE_OPERAND (temp, 1) = *p; *p = temp; } else { temp = create_tmp_var (type, "retval"); *p = build2 (INIT_EXPR, type, temp, *p); } return temp; } return NULL_TREE; } /* Prepare calls to builtins to SAVE and RESTORE the stack as well as a temporary through which they communicate. */ static void build_stack_save_restore (tree *save, tree *restore) { tree save_call, tmp_var; save_call = build_function_call_expr (implicit_built_in_decls[BUILT_IN_STACK_SAVE], NULL_TREE); tmp_var = create_tmp_var (ptr_type_node, "saved_stack"); *save = build2 (MODIFY_EXPR, ptr_type_node, tmp_var, save_call); *restore = build_function_call_expr (implicit_built_in_decls[BUILT_IN_STACK_RESTORE], tree_cons (NULL_TREE, tmp_var, NULL_TREE)); } /* Gimplify a BIND_EXPR. Just voidify and recurse. */ static enum gimplify_status gimplify_bind_expr (tree *expr_p, tree *pre_p) { tree bind_expr = *expr_p; bool old_save_stack = gimplify_ctxp->save_stack; tree t; tree temp = voidify_wrapper_expr (bind_expr, NULL); /* Mark variables seen in this bind expr. */ for (t = BIND_EXPR_VARS (bind_expr); t ; t = TREE_CHAIN (t)) { if (TREE_CODE (t) == VAR_DECL) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; /* Mark variable as local. */ if (ctx && !is_global_var (t) && (! DECL_SEEN_IN_BIND_EXPR_P (t) || splay_tree_lookup (ctx->variables, (splay_tree_key) t) == NULL)) omp_add_variable (gimplify_omp_ctxp, t, GOVD_LOCAL | GOVD_SEEN); DECL_SEEN_IN_BIND_EXPR_P (t) = 1; } /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE && !TREE_THIS_VOLATILE (t) && (TREE_CODE (t) == VAR_DECL && !DECL_HARD_REGISTER (t)) && !needs_to_live_in_memory (t)) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; } gimple_push_bind_expr (bind_expr); gimplify_ctxp->save_stack = false; gimplify_to_stmt_list (&BIND_EXPR_BODY (bind_expr)); if (gimplify_ctxp->save_stack) { tree stack_save, stack_restore; /* Save stack on entry and restore it on exit. Add a try_finally block to achieve this. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ build_stack_save_restore (&stack_save, &stack_restore); t = build2 (TRY_FINALLY_EXPR, void_type_node, BIND_EXPR_BODY (bind_expr), NULL_TREE); append_to_statement_list (stack_restore, &TREE_OPERAND (t, 1)); BIND_EXPR_BODY (bind_expr) = NULL_TREE; append_to_statement_list (stack_save, &BIND_EXPR_BODY (bind_expr)); append_to_statement_list (t, &BIND_EXPR_BODY (bind_expr)); } gimplify_ctxp->save_stack = old_save_stack; gimple_pop_bind_expr (); if (temp) { *expr_p = temp; append_to_statement_list (bind_expr, pre_p); return GS_OK; } else return GS_ALL_DONE; } /* Gimplify a RETURN_EXPR. If the expression to be returned is not a GIMPLE value, it is assigned to a new temporary and the statement is re-written to return the temporary. PRE_P points to the list where side effects that must happen before STMT should be stored. */ static enum gimplify_status gimplify_return_expr (tree stmt, tree *pre_p) { tree ret_expr = TREE_OPERAND (stmt, 0); tree result_decl, result; if (!ret_expr || TREE_CODE (ret_expr) == RESULT_DECL || ret_expr == error_mark_node) return GS_ALL_DONE; if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))) result_decl = NULL_TREE; else { result_decl = TREE_OPERAND (ret_expr, 0); if (TREE_CODE (result_decl) == INDIRECT_REF) /* See through a return by reference. */ result_decl = TREE_OPERAND (result_decl, 0); gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR || TREE_CODE (ret_expr) == INIT_EXPR) && TREE_CODE (result_decl) == RESULT_DECL); } /* If aggregate_value_p is true, then we can return the bare RESULT_DECL. Recall that aggregate_value_p is FALSE for any aggregate type that is returned in registers. If we're returning values in registers, then we don't want to extend the lifetime of the RESULT_DECL, particularly across another call. In addition, for those aggregates for which hard_function_value generates a PARALLEL, we'll die during normal expansion of structure assignments; there's special code in expand_return to handle this case that does not exist in expand_expr. */ if (!result_decl || aggregate_value_p (result_decl, TREE_TYPE (current_function_decl))) result = result_decl; else if (gimplify_ctxp->return_temp) result = gimplify_ctxp->return_temp; else { result = create_tmp_var (TREE_TYPE (result_decl), NULL); /* ??? With complex control flow (usually involving abnormal edges), we can wind up warning about an uninitialized value for this. Due to how this variable is constructed and initialized, this is never true. Give up and never warn. */ TREE_NO_WARNING (result) = 1; gimplify_ctxp->return_temp = result; } /* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use. Then gimplify the whole thing. */ if (result != result_decl) TREE_OPERAND (ret_expr, 0) = result; gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p); /* If we didn't use a temporary, then the result is just the result_decl. Otherwise we need a simple copy. This should already be gimple. */ if (result == result_decl) ret_expr = result; else ret_expr = build2 (MODIFY_EXPR, TREE_TYPE (result), result_decl, result); TREE_OPERAND (stmt, 0) = ret_expr; return GS_ALL_DONE; } /* Gimplifies a DECL_EXPR node *STMT_P by making any necessary allocation and initialization explicit. */ static enum gimplify_status gimplify_decl_expr (tree *stmt_p) { tree stmt = *stmt_p; tree decl = DECL_EXPR_DECL (stmt); *stmt_p = NULL_TREE; if (TREE_TYPE (decl) == error_mark_node) return GS_ERROR; if ((TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == VAR_DECL) && !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl))) gimplify_type_sizes (TREE_TYPE (decl), stmt_p); if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl)) { tree init = DECL_INITIAL (decl); if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { /* This is a variable-sized decl. Simplify its size and mark it for deferred expansion. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ tree t, args, addr, ptr_type; gimplify_one_sizepos (&DECL_SIZE (decl), stmt_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), stmt_p); /* All occurrences of this decl in final gimplified code will be replaced by indirection. Setting DECL_VALUE_EXPR does two things: First, it lets the rest of the gimplifier know what replacement to use. Second, it lets the debug info know where to find the value. */ ptr_type = build_pointer_type (TREE_TYPE (decl)); addr = create_tmp_var (ptr_type, get_name (decl)); DECL_IGNORED_P (addr) = 0; t = build_fold_indirect_ref (addr); SET_DECL_VALUE_EXPR (decl, t); DECL_HAS_VALUE_EXPR_P (decl) = 1; args = tree_cons (NULL, DECL_SIZE_UNIT (decl), NULL); t = built_in_decls[BUILT_IN_ALLOCA]; t = build_function_call_expr (t, args); t = fold_convert (ptr_type, t); t = build2 (MODIFY_EXPR, void_type_node, addr, t); gimplify_and_add (t, stmt_p); /* Indicate that we need to restore the stack level when the enclosing BIND_EXPR is exited. */ gimplify_ctxp->save_stack = true; } if (init && init != error_mark_node) { if (!TREE_STATIC (decl)) { DECL_INITIAL (decl) = NULL_TREE; init = build2 (INIT_EXPR, void_type_node, decl, init); gimplify_and_add (init, stmt_p); } else /* We must still examine initializers for static variables as they may contain a label address. */ walk_tree (&init, force_labels_r, NULL, NULL); } /* Some front ends do not explicitly declare all anonymous artificial variables. We compensate here by declaring the variables, though it would be better if the front ends would explicitly declare them. */ if (!DECL_SEEN_IN_BIND_EXPR_P (decl) && DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE) gimple_add_tmp_var (decl); } return GS_ALL_DONE; } /* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body and replacing the LOOP_EXPR with goto, but if the loop contains an EXIT_EXPR, we need to append a label for it to jump to. */ static enum gimplify_status gimplify_loop_expr (tree *expr_p, tree *pre_p) { tree saved_label = gimplify_ctxp->exit_label; tree start_label = build1 (LABEL_EXPR, void_type_node, NULL_TREE); tree jump_stmt = build_and_jump (&LABEL_EXPR_LABEL (start_label)); append_to_statement_list (start_label, pre_p); gimplify_ctxp->exit_label = NULL_TREE; gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p); if (gimplify_ctxp->exit_label) { append_to_statement_list (jump_stmt, pre_p); *expr_p = build1 (LABEL_EXPR, void_type_node, gimplify_ctxp->exit_label); } else *expr_p = jump_stmt; gimplify_ctxp->exit_label = saved_label; return GS_ALL_DONE; } /* Compare two case labels. Because the front end should already have made sure that case ranges do not overlap, it is enough to only compare the CASE_LOW values of each case label. */ static int compare_case_labels (const void *p1, const void *p2) { tree case1 = *(tree *)p1; tree case2 = *(tree *)p2; return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2)); } /* Sort the case labels in LABEL_VEC in place in ascending order. */ void sort_case_labels (tree label_vec) { size_t len = TREE_VEC_LENGTH (label_vec); tree default_case = TREE_VEC_ELT (label_vec, len - 1); if (CASE_LOW (default_case)) { size_t i; /* The last label in the vector should be the default case but it is not. */ for (i = 0; i < len; ++i) { tree t = TREE_VEC_ELT (label_vec, i); if (!CASE_LOW (t)) { default_case = t; TREE_VEC_ELT (label_vec, i) = TREE_VEC_ELT (label_vec, len - 1); TREE_VEC_ELT (label_vec, len - 1) = default_case; break; } } } qsort (&TREE_VEC_ELT (label_vec, 0), len - 1, sizeof (tree), compare_case_labels); } /* Gimplify a SWITCH_EXPR, and collect a TREE_VEC of the labels it can branch to. */ static enum gimplify_status gimplify_switch_expr (tree *expr_p, tree *pre_p) { tree switch_expr = *expr_p; enum gimplify_status ret; ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val, fb_rvalue); if (SWITCH_BODY (switch_expr)) { VEC(tree,heap) *labels, *saved_labels; tree label_vec, default_case = NULL_TREE; size_t i, len; /* If someone can be bothered to fill in the labels, they can be bothered to null out the body too. */ gcc_assert (!SWITCH_LABELS (switch_expr)); saved_labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = VEC_alloc (tree, heap, 8); gimplify_to_stmt_list (&SWITCH_BODY (switch_expr)); labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = saved_labels; i = 0; while (i < VEC_length (tree, labels)) { tree elt = VEC_index (tree, labels, i); tree low = CASE_LOW (elt); bool remove_element = FALSE; if (low) { /* Discard empty ranges. */ tree high = CASE_HIGH (elt); if (high && INT_CST_LT (high, low)) remove_element = TRUE; } else { /* The default case must be the last label in the list. */ gcc_assert (!default_case); default_case = elt; remove_element = TRUE; } if (remove_element) VEC_ordered_remove (tree, labels, i); else i++; } len = i; label_vec = make_tree_vec (len + 1); SWITCH_LABELS (*expr_p) = label_vec; append_to_statement_list (switch_expr, pre_p); if (! default_case) { /* If the switch has no default label, add one, so that we jump around the switch body. */ default_case = build3 (CASE_LABEL_EXPR, void_type_node, NULL_TREE, NULL_TREE, create_artificial_label ()); append_to_statement_list (SWITCH_BODY (switch_expr), pre_p); *expr_p = build1 (LABEL_EXPR, void_type_node, CASE_LABEL (default_case)); } else *expr_p = SWITCH_BODY (switch_expr); for (i = 0; i < len; ++i) TREE_VEC_ELT (label_vec, i) = VEC_index (tree, labels, i); TREE_VEC_ELT (label_vec, len) = default_case; VEC_free (tree, heap, labels); sort_case_labels (label_vec); SWITCH_BODY (switch_expr) = NULL; } else gcc_assert (SWITCH_LABELS (switch_expr)); return ret; } static enum gimplify_status gimplify_case_label_expr (tree *expr_p) { tree expr = *expr_p; struct gimplify_ctx *ctxp; /* Invalid OpenMP programs can play Duff's Device type games with #pragma omp parallel. At least in the C front end, we don't detect such invalid branches until after gimplification. */ for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context) if (ctxp->case_labels) break; VEC_safe_push (tree, heap, ctxp->case_labels, expr); *expr_p = build1 (LABEL_EXPR, void_type_node, CASE_LABEL (expr)); return GS_ALL_DONE; } /* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first if necessary. */ tree build_and_jump (tree *label_p) { if (label_p == NULL) /* If there's nowhere to jump, just fall through. */ return NULL_TREE; if (*label_p == NULL_TREE) { tree label = create_artificial_label (); *label_p = label; } return build1 (GOTO_EXPR, void_type_node, *label_p); } /* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR. This also involves building a label to jump to and communicating it to gimplify_loop_expr through gimplify_ctxp->exit_label. */ static enum gimplify_status gimplify_exit_expr (tree *expr_p) { tree cond = TREE_OPERAND (*expr_p, 0); tree expr; expr = build_and_jump (&gimplify_ctxp->exit_label); expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE); *expr_p = expr; return GS_OK; } /* A helper function to be called via walk_tree. Mark all labels under *TP as being forced. To be called for DECL_INITIAL of static variables. */ tree force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { if (TYPE_P (*tp)) *walk_subtrees = 0; if (TREE_CODE (*tp) == LABEL_DECL) FORCED_LABEL (*tp) = 1; return NULL_TREE; } /* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is different from its canonical type, wrap the whole thing inside a NOP_EXPR and force the type of the COMPONENT_REF to be the canonical type. The canonical type of a COMPONENT_REF is the type of the field being referenced--unless the field is a bit-field which can be read directly in a smaller mode, in which case the canonical type is the sign-appropriate type corresponding to that mode. */ static void canonicalize_component_ref (tree *expr_p) { tree expr = *expr_p; tree type; gcc_assert (TREE_CODE (expr) == COMPONENT_REF); if (INTEGRAL_TYPE_P (TREE_TYPE (expr))) type = TREE_TYPE (get_unwidened (expr, NULL_TREE)); else type = TREE_TYPE (TREE_OPERAND (expr, 1)); if (TREE_TYPE (expr) != type) { tree old_type = TREE_TYPE (expr); /* Set the type of the COMPONENT_REF to the underlying type. */ TREE_TYPE (expr) = type; /* And wrap the whole thing inside a NOP_EXPR. */ expr = build1 (NOP_EXPR, old_type, expr); *expr_p = expr; } } /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR by converting T array[U]; (T *)&array ==> &array[L] where L is the lower bound. For simplicity, only do this for constant lower bound. */ static void canonicalize_addr_expr (tree *expr_p) { tree expr = *expr_p; tree ctype = TREE_TYPE (expr); tree addr_expr = TREE_OPERAND (expr, 0); tree atype = TREE_TYPE (addr_expr); tree dctype, datype, ddatype, otype, obj_expr; /* Both cast and addr_expr types should be pointers. */ if (!POINTER_TYPE_P (ctype) || !POINTER_TYPE_P (atype)) return; /* The addr_expr type should be a pointer to an array. */ datype = TREE_TYPE (atype); if (TREE_CODE (datype) != ARRAY_TYPE) return; /* Both cast and addr_expr types should address the same object type. */ dctype = TREE_TYPE (ctype); ddatype = TREE_TYPE (datype); if (!lang_hooks.types_compatible_p (ddatype, dctype)) return; /* The addr_expr and the object type should match. */ obj_expr = TREE_OPERAND (addr_expr, 0); otype = TREE_TYPE (obj_expr); if (!lang_hooks.types_compatible_p (otype, datype)) return; /* The lower bound and element sizes must be constant. */ if (!TYPE_SIZE_UNIT (dctype) || TREE_CODE (TYPE_SIZE_UNIT (dctype)) != INTEGER_CST || !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype)) || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST) return; /* All checks succeeded. Build a new node to merge the cast. */ *expr_p = build4 (ARRAY_REF, dctype, obj_expr, TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (dctype), size_int (TYPE_ALIGN_UNIT (dctype)))); *expr_p = build1 (ADDR_EXPR, ctype, *expr_p); } /* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions underneath as appropriate. */ static enum gimplify_status gimplify_conversion (tree *expr_p) { gcc_assert (TREE_CODE (*expr_p) == NOP_EXPR || TREE_CODE (*expr_p) == CONVERT_EXPR); /* Then strip away all but the outermost conversion. */ STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0)); /* And remove the outermost conversion if it's useless. */ if (tree_ssa_useless_type_conversion (*expr_p)) *expr_p = TREE_OPERAND (*expr_p, 0); /* If we still have a conversion at the toplevel, then canonicalize some constructs. */ if (TREE_CODE (*expr_p) == NOP_EXPR || TREE_CODE (*expr_p) == CONVERT_EXPR) { tree sub = TREE_OPERAND (*expr_p, 0); /* If a NOP conversion is changing the type of a COMPONENT_REF expression, then canonicalize its type now in order to expose more redundant conversions. */ if (TREE_CODE (sub) == COMPONENT_REF) canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0)); /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR. */ else if (TREE_CODE (sub) == ADDR_EXPR) canonicalize_addr_expr (expr_p); } return GS_OK; } /* Gimplify a VAR_DECL or PARM_DECL. Returns GS_OK if we expanded a DECL_VALUE_EXPR, and it's worth re-examining things. */ static enum gimplify_status gimplify_var_or_parm_decl (tree *expr_p) { tree decl = *expr_p; /* ??? If this is a local variable, and it has not been seen in any outer BIND_EXPR, then it's probably the result of a duplicate declaration, for which we've already issued an error. It would be really nice if the front end wouldn't leak these at all. Currently the only known culprit is C++ destructors, as seen in g++.old-deja/g++.jason/binding.C. */ if (TREE_CODE (decl) == VAR_DECL && !DECL_SEEN_IN_BIND_EXPR_P (decl) && !TREE_STATIC (decl) && !DECL_EXTERNAL (decl) && decl_function_context (decl) == current_function_decl) { gcc_assert (errorcount || sorrycount); return GS_ERROR; } /* When within an OpenMP context, notice uses of variables. */ if (gimplify_omp_ctxp && omp_notice_variable (gimplify_omp_ctxp, decl, true)) return GS_ALL_DONE; /* If the decl is an alias for another expression, substitute it now. */ if (DECL_HAS_VALUE_EXPR_P (decl)) { *expr_p = unshare_expr (DECL_VALUE_EXPR (decl)); return GS_OK; } return GS_ALL_DONE; } /* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR node pointed to by EXPR_P. compound_lval : min_lval '[' val ']' | min_lval '.' ID | compound_lval '[' val ']' | compound_lval '.' ID This is not part of the original SIMPLE definition, which separates array and member references, but it seems reasonable to handle them together. Also, this way we don't run into problems with union aliasing; gcc requires that for accesses through a union to alias, the union reference must be explicit, which was not always the case when we were splitting up array and member refs. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_compound_lval (tree *expr_p, tree *pre_p, tree *post_p, fallback_t fallback) { tree *p; VEC(tree,heap) *stack; enum gimplify_status ret = GS_OK, tret; int i; /* Create a stack of the subexpressions so later we can walk them in order from inner to outer. */ stack = VEC_alloc (tree, heap, 10); /* We can handle anything that get_inner_reference can deal with. */ for (p = expr_p; ; p = &TREE_OPERAND (*p, 0)) { restart: /* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */ if (TREE_CODE (*p) == INDIRECT_REF) *p = fold_indirect_ref (*p); if (handled_component_p (*p)) ; /* Expand DECL_VALUE_EXPR now. In some cases that may expose additional COMPONENT_REFs. */ else if ((TREE_CODE (*p) == VAR_DECL || TREE_CODE (*p) == PARM_DECL) && gimplify_var_or_parm_decl (p) == GS_OK) goto restart; else break; VEC_safe_push (tree, heap, stack, *p); } gcc_assert (VEC_length (tree, stack)); /* Now STACK is a stack of pointers to all the refs we've walked through and P points to the innermost expression. Java requires that we elaborated nodes in source order. That means we must gimplify the inner expression followed by each of the indices, in order. But we can't gimplify the inner expression until we deal with any variable bounds, sizes, or positions in order to deal with PLACEHOLDER_EXPRs. So we do this in three steps. First we deal with the annotations for any variables in the components, then we gimplify the base, then we gimplify any indices, from left to right. */ for (i = VEC_length (tree, stack) - 1; i >= 0; i--) { tree t = VEC_index (tree, stack, i); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the low bound and element type size and put them into the ARRAY_REF. If these values are set, they have already been gimplified. */ if (!TREE_OPERAND (t, 2)) { tree low = unshare_expr (array_ref_low_bound (t)); if (!is_gimple_min_invariant (low)) { TREE_OPERAND (t, 2) = low; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } if (!TREE_OPERAND (t, 3)) { tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); tree elmt_size = unshare_expr (array_ref_element_size (t)); tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type)); /* Divide the element size by the alignment of the element type (above). */ elmt_size = size_binop (EXACT_DIV_EXPR, elmt_size, factor); if (!is_gimple_min_invariant (elmt_size)) { TREE_OPERAND (t, 3) = elmt_size; tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } } else if (TREE_CODE (t) == COMPONENT_REF) { /* Set the field offset into T and gimplify it. */ if (!TREE_OPERAND (t, 2)) { tree offset = unshare_expr (component_ref_field_offset (t)); tree field = TREE_OPERAND (t, 1); tree factor = size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT); /* Divide the offset by its alignment. */ offset = size_binop (EXACT_DIV_EXPR, offset, factor); if (!is_gimple_min_invariant (offset)) { TREE_OPERAND (t, 2) = offset; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } } } /* Step 2 is to gimplify the base expression. Make sure lvalue is set so as to match the min_lval predicate. Failure to do so may result in the creation of large aggregate temporaries. */ tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback | fb_lvalue); ret = MIN (ret, tret); /* And finally, the indices and operands to BIT_FIELD_REF. During this loop we also remove any useless conversions. */ for (; VEC_length (tree, stack) > 0; ) { tree t = VEC_pop (tree, stack); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the dimension. Temporary fix for gcc.c-torture/execute/20040313-1.c. Gimplify non-constant array indices into a temporary variable. FIXME - The real fix is to gimplify post-modify expressions into a minimal gimple lvalue. However, that exposes bugs in alias analysis. The alias analyzer does not handle &PTR->FIELD very well. Will fix after the branch is merged into mainline (dnovillo 2004-05-03). */ if (!is_gimple_min_invariant (TREE_OPERAND (t, 1))) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } else if (TREE_CODE (t) == BIT_FIELD_REF) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); } STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0)); /* The innermost expression P may have originally had TREE_SIDE_EFFECTS set which would have caused all the outer expressions in EXPR_P leading to P to also have had TREE_SIDE_EFFECTS set. */ recalculate_side_effects (t); } tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback); ret = MIN (ret, tret); /* If the outermost expression is a COMPONENT_REF, canonicalize its type. */ if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF) { canonicalize_component_ref (expr_p); ret = MIN (ret, GS_OK); } VEC_free (tree, heap, stack); return ret; } /* Gimplify the self modifying expression pointed to by EXPR_P (++, --, +=, -=). PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_self_mod_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { enum tree_code code; tree lhs, lvalue, rhs, t1, post = NULL, *orig_post_p = post_p; bool postfix; enum tree_code arith_code; enum gimplify_status ret; code = TREE_CODE (*expr_p); gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR || code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR); /* Prefix or postfix? */ if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR) /* Faster to treat as prefix if result is not used. */ postfix = want_value; else postfix = false; /* For postfix, make sure the inner expression's post side effects are executed after side effects from this expression. */ if (postfix) post_p = &post; /* Add or subtract? */ if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) arith_code = PLUS_EXPR; else arith_code = MINUS_EXPR; /* Gimplify the LHS into a GIMPLE lvalue. */ lvalue = TREE_OPERAND (*expr_p, 0); ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; /* Extract the operands to the arithmetic operation. */ lhs = lvalue; rhs = TREE_OPERAND (*expr_p, 1); /* For postfix operator, we evaluate the LHS to an rvalue and then use that as the result value and in the postqueue operation. */ if (postfix) { ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue); if (ret == GS_ERROR) return ret; } t1 = build2 (arith_code, TREE_TYPE (*expr_p), lhs, rhs); t1 = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1); if (postfix) { gimplify_and_add (t1, orig_post_p); append_to_statement_list (post, orig_post_p); *expr_p = lhs; return GS_ALL_DONE; } else { *expr_p = t1; return GS_OK; } } /* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */ static void maybe_with_size_expr (tree *expr_p) { tree expr = *expr_p; tree type = TREE_TYPE (expr); tree size; /* If we've already wrapped this or the type is error_mark_node, we can't do anything. */ if (TREE_CODE (expr) == WITH_SIZE_EXPR || type == error_mark_node) return; /* If the size isn't known or is a constant, we have nothing to do. */ size = TYPE_SIZE_UNIT (type); if (!size || TREE_CODE (size) == INTEGER_CST) return; /* Otherwise, make a WITH_SIZE_EXPR. */ size = unshare_expr (size); size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr); *expr_p = build2 (WITH_SIZE_EXPR, type, expr, size); } /* Subroutine of gimplify_call_expr: Gimplify a single argument. */ static enum gimplify_status gimplify_arg (tree *expr_p, tree *pre_p) { bool (*test) (tree); fallback_t fb; /* In general, we allow lvalues for function arguments to avoid extra overhead of copying large aggregates out of even larger aggregates into temporaries only to copy the temporaries to the argument list. Make optimizers happy by pulling out to temporaries those types that fit in registers. */ if (is_gimple_reg_type (TREE_TYPE (*expr_p))) test = is_gimple_val, fb = fb_rvalue; else test = is_gimple_lvalue, fb = fb_either; /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (expr_p); /* There is a sequence point before a function call. Side effects in the argument list must occur before the actual call. So, when gimplifying arguments, force gimplify_expr to use an internal post queue which is then appended to the end of PRE_P. */ return gimplify_expr (expr_p, pre_p, NULL, test, fb); } /* Gimplify the CALL_EXPR node pointed to by EXPR_P. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. WANT_VALUE is true if the result of the call is desired. */ static enum gimplify_status gimplify_call_expr (tree *expr_p, tree *pre_p, bool want_value) { tree decl; tree arglist; enum gimplify_status ret; gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR); /* For reliable diagnostics during inlining, it is necessary that every call_expr be annotated with file and line. */ if (! EXPR_HAS_LOCATION (*expr_p)) SET_EXPR_LOCATION (*expr_p, input_location); /* This may be a call to a builtin function. Builtin function calls may be transformed into different (and more efficient) builtin function calls under certain circumstances. Unfortunately, gimplification can muck things up enough that the builtin expanders are not aware that certain transformations are still valid. So we attempt transformation/gimplification of the call before we gimplify the CALL_EXPR. At this time we do not manage to transform all calls in the same manner as the expanders do, but we do transform most of them. */ decl = get_callee_fndecl (*expr_p); if (decl && DECL_BUILT_IN (decl)) { tree arglist = TREE_OPERAND (*expr_p, 1); tree new = fold_builtin (decl, arglist, !want_value); if (new && new != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new; return GS_OK; } if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (decl) == BUILT_IN_VA_START) { if (!arglist || !TREE_CHAIN (arglist)) { error ("too few arguments to function %<va_start%>"); *expr_p = build_empty_stmt (); return GS_OK; } #ifdef KEY /* bug 10904: This call produces __builtin_va_start(ap,0,0), and "wgen -m32" cannot handle the 2nd constant arg. This would be a problem for wgen only for some cases (constructors and destructors) in C++, for other cases, wgen gets the TREE before this lowering process. */ if (!flag_spin_file) #endif if (fold_builtin_next_arg (TREE_CHAIN (arglist))) { *expr_p = build_empty_stmt (); return GS_OK; } /* Avoid gimplifying the second argument to va_start, which needs to be the plain PARM_DECL. */ return gimplify_arg (&TREE_VALUE (TREE_OPERAND (*expr_p, 1)), pre_p); } } /* There is a sequence point before the call, so any side effects in the calling expression must occur before the actual call. Force gimplify_expr to use an internal post queue. */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, NULL, is_gimple_call_addr, fb_rvalue); if (PUSH_ARGS_REVERSED) TREE_OPERAND (*expr_p, 1) = nreverse (TREE_OPERAND (*expr_p, 1)); for (arglist = TREE_OPERAND (*expr_p, 1); arglist; arglist = TREE_CHAIN (arglist)) { enum gimplify_status t; t = gimplify_arg (&TREE_VALUE (arglist), pre_p); if (t == GS_ERROR) ret = GS_ERROR; } if (PUSH_ARGS_REVERSED) TREE_OPERAND (*expr_p, 1) = nreverse (TREE_OPERAND (*expr_p, 1)); /* Try this again in case gimplification exposed something. */ if (ret != GS_ERROR) { decl = get_callee_fndecl (*expr_p); if (decl && DECL_BUILT_IN (decl)) { tree arglist = TREE_OPERAND (*expr_p, 1); tree new = fold_builtin (decl, arglist, !want_value); if (new && new != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new; return GS_OK; } } } /* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its decl. This allows us to eliminate redundant or useless calls to "const" functions. */ if (TREE_CODE (*expr_p) == CALL_EXPR && (call_expr_flags (*expr_p) & (ECF_CONST | ECF_PURE))) TREE_SIDE_EFFECTS (*expr_p) = 0; return ret; } /* Handle shortcut semantics in the predicate operand of a COND_EXPR by rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs. TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the condition is true or false, respectively. If null, we should generate our own to skip over the evaluation of this specific expression. This function is the tree equivalent of do_jump. shortcut_cond_r should only be called by shortcut_cond_expr. */ static tree shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p) { tree local_label = NULL_TREE; tree t, expr = NULL; /* OK, it's not a simple case; we need to pull apart the COND_EXPR to retain the shortcut semantics. Just insert the gotos here; shortcut_cond_expr will append the real blocks later. */ if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { /* Turn if (a && b) into if (a); else goto no; if (b) goto yes; else goto no; (no:) */ if (false_label_p == NULL) false_label_p = &local_label; t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p); append_to_statement_list (t, &expr); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { /* Turn if (a || b) into if (a) goto yes; if (b) goto yes; else goto no; (yes:) */ if (true_label_p == NULL) true_label_p = &local_label; t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL); append_to_statement_list (t, &expr); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == COND_EXPR) { /* As long as we're messing with gotos, turn if (a ? b : c) into if (a) if (b) goto yes; else goto no; else if (c) goto yes; else goto no; */ expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0), shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p), shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p, false_label_p)); } else { expr = build3 (COND_EXPR, void_type_node, pred, build_and_jump (true_label_p), build_and_jump (false_label_p)); } if (local_label) { t = build1 (LABEL_EXPR, void_type_node, local_label); append_to_statement_list (t, &expr); } return expr; } static tree shortcut_cond_expr (tree expr) { tree pred = TREE_OPERAND (expr, 0); tree then_ = TREE_OPERAND (expr, 1); tree else_ = TREE_OPERAND (expr, 2); tree true_label, false_label, end_label, t; tree *true_label_p; tree *false_label_p; bool emit_end, emit_false, jump_over_else; bool then_se = then_ && TREE_SIDE_EFFECTS (then_); bool else_se = else_ && TREE_SIDE_EFFECTS (else_); /* First do simple transformations. */ if (!else_se) { /* If there is no 'else', turn (a && b) into if (a) if (b). */ while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); then_ = shortcut_cond_expr (expr); then_se = then_ && TREE_SIDE_EFFECTS (then_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE); } } if (!then_se) { /* If there is no 'then', turn if (a || b); else d into if (a); else if (b); else d. */ while (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); else_ = shortcut_cond_expr (expr); else_se = else_ && TREE_SIDE_EFFECTS (else_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_); } } /* If we're done, great. */ if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR && TREE_CODE (pred) != TRUTH_ORIF_EXPR) return expr; /* Otherwise we need to mess with gotos. Change if (a) c; else d; to if (a); else goto no; c; goto end; no: d; end: and recursively gimplify the condition. */ true_label = false_label = end_label = NULL_TREE; /* If our arms just jump somewhere, hijack those labels so we don't generate jumps to jumps. */ if (then_ && TREE_CODE (then_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL) { true_label = GOTO_DESTINATION (then_); then_ = NULL; then_se = false; } if (else_ && TREE_CODE (else_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL) { false_label = GOTO_DESTINATION (else_); else_ = NULL; else_se = false; } /* If we aren't hijacking a label for the 'then' branch, it falls through. */ if (true_label) true_label_p = &true_label; else true_label_p = NULL; /* The 'else' branch also needs a label if it contains interesting code. */ if (false_label || else_se) false_label_p = &false_label; else false_label_p = NULL; /* If there was nothing else in our arms, just forward the label(s). */ if (!then_se && !else_se) return shortcut_cond_r (pred, true_label_p, false_label_p); /* If our last subexpression already has a terminal label, reuse it. */ if (else_se) expr = expr_last (else_); else if (then_se) expr = expr_last (then_); else expr = NULL; if (expr && TREE_CODE (expr) == LABEL_EXPR) end_label = LABEL_EXPR_LABEL (expr); /* If we don't care about jumping to the 'else' branch, jump to the end if the condition is false. */ if (!false_label_p) false_label_p = &end_label; /* We only want to emit these labels if we aren't hijacking them. */ emit_end = (end_label == NULL_TREE); emit_false = (false_label == NULL_TREE); /* We only emit the jump over the else clause if we have to--if the then clause may fall through. Otherwise we can wind up with a useless jump and a useless label at the end of gimplified code, which will cause us to think that this conditional as a whole falls through even if it doesn't. If we then inline a function which ends with such a condition, that can cause us to issue an inappropriate warning about control reaching the end of a non-void function. */ jump_over_else = block_may_fallthru (then_); pred = shortcut_cond_r (pred, true_label_p, false_label_p); expr = NULL; append_to_statement_list (pred, &expr); append_to_statement_list (then_, &expr); if (else_se) { if (jump_over_else) { t = build_and_jump (&end_label); append_to_statement_list (t, &expr); } if (emit_false) { t = build1 (LABEL_EXPR, void_type_node, false_label); append_to_statement_list (t, &expr); } append_to_statement_list (else_, &expr); } if (emit_end && end_label) { t = build1 (LABEL_EXPR, void_type_node, end_label); append_to_statement_list (t, &expr); } return expr; } /* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */ tree gimple_boolify (tree expr) { tree type = TREE_TYPE (expr); if (TREE_CODE (type) == BOOLEAN_TYPE) return expr; switch (TREE_CODE (expr)) { case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: /* Also boolify the arguments of truth exprs. */ TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1)); /* FALLTHRU */ case TRUTH_NOT_EXPR: TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* FALLTHRU */ case EQ_EXPR: case NE_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: /* These expressions always produce boolean results. */ TREE_TYPE (expr) = boolean_type_node; return expr; default: /* Other expressions that get here must have boolean values, but might need to be converted to the appropriate mode. */ return fold_convert (boolean_type_node, expr); } } /* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;' into if (p) if (p) t1 = a; a; else or else t1 = b; b; t1; The second form is used when *EXPR_P is of type void. TARGET is the tree for T1 above. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_cond_expr (tree *expr_p, tree *pre_p, fallback_t fallback) { tree expr = *expr_p; tree tmp, tmp2, type; enum gimplify_status ret; type = TREE_TYPE (expr); /* If this COND_EXPR has a value, copy the values into a temporary within the arms. */ if (! VOID_TYPE_P (type)) { tree result; if ((fallback & fb_lvalue) == 0) { result = tmp2 = tmp = create_tmp_var (TREE_TYPE (expr), "iftmp"); ret = GS_ALL_DONE; } else { tree type = build_pointer_type (TREE_TYPE (expr)); if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node) TREE_OPERAND (expr, 1) = build_fold_addr_expr (TREE_OPERAND (expr, 1)); if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node) TREE_OPERAND (expr, 2) = build_fold_addr_expr (TREE_OPERAND (expr, 2)); tmp2 = tmp = create_tmp_var (type, "iftmp"); expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (expr, 0), TREE_OPERAND (expr, 1), TREE_OPERAND (expr, 2)); result = build_fold_indirect_ref (tmp); ret = GS_ALL_DONE; } /* Build the then clause, 't1 = a;'. But don't build an assignment if this branch is void; in C++ it can be, if it's a throw. */ if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node) TREE_OPERAND (expr, 1) = build2 (MODIFY_EXPR, void_type_node, tmp, TREE_OPERAND (expr, 1)); /* Build the else clause, 't1 = b;'. */ if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node) TREE_OPERAND (expr, 2) = build2 (MODIFY_EXPR, void_type_node, tmp2, TREE_OPERAND (expr, 2)); TREE_TYPE (expr) = void_type_node; recalculate_side_effects (expr); /* Move the COND_EXPR to the prequeue. */ gimplify_and_add (expr, pre_p); *expr_p = result; return ret; } /* Make sure the condition has BOOLEAN_TYPE. */ TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* Break apart && and || conditions. */ if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR || TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR) { expr = shortcut_cond_expr (expr); if (expr != *expr_p) { *expr_p = expr; /* We can't rely on gimplify_expr to re-gimplify the expanded form properly, as cleanups might cause the target labels to be wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to set up a conditional context. */ gimple_push_condition (); gimplify_stmt (expr_p); gimple_pop_condition (pre_p); return GS_ALL_DONE; } } /* Now do the normal gimplification. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); gimple_push_condition (); gimplify_to_stmt_list (&TREE_OPERAND (expr, 1)); gimplify_to_stmt_list (&TREE_OPERAND (expr, 2)); recalculate_side_effects (expr); gimple_pop_condition (pre_p); if (ret == GS_ERROR) ; else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1))) ret = GS_ALL_DONE; else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 2))) /* Rewrite "if (a); else b" to "if (!a) b" */ { TREE_OPERAND (expr, 0) = invert_truthvalue (TREE_OPERAND (expr, 0)); ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); tmp = TREE_OPERAND (expr, 1); TREE_OPERAND (expr, 1) = TREE_OPERAND (expr, 2); TREE_OPERAND (expr, 2) = tmp; } else /* Both arms are empty; replace the COND_EXPR with its predicate. */ expr = TREE_OPERAND (expr, 0); *expr_p = expr; return ret; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memcpy. */ static enum gimplify_status gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value) { tree args, t, to, to_ptr, from; to = TREE_OPERAND (*expr_p, 0); from = TREE_OPERAND (*expr_p, 1); args = tree_cons (NULL, size, NULL); t = build_fold_addr_expr (from); args = tree_cons (NULL, t, args); to_ptr = build_fold_addr_expr (to); args = tree_cons (NULL, to_ptr, args); t = implicit_built_in_decls[BUILT_IN_MEMCPY]; t = build_function_call_expr (t, args); if (want_value) { t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t); t = build1 (INDIRECT_REF, TREE_TYPE (to), t); } *expr_p = t; return GS_OK; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memset. In this case we know that the RHS is a CONSTRUCTOR with an empty element list. */ static enum gimplify_status gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value) { tree args, t, to, to_ptr; to = TREE_OPERAND (*expr_p, 0); args = tree_cons (NULL, size, NULL); args = tree_cons (NULL, integer_zero_node, args); to_ptr = build_fold_addr_expr (to); args = tree_cons (NULL, to_ptr, args); t = implicit_built_in_decls[BUILT_IN_MEMSET]; t = build_function_call_expr (t, args); if (want_value) { t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t); t = build1 (INDIRECT_REF, TREE_TYPE (to), t); } *expr_p = t; return GS_OK; } /* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree, determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an assignment. Returns non-null if we detect a potential overlap. */ struct gimplify_init_ctor_preeval_data { /* The base decl of the lhs object. May be NULL, in which case we have to assume the lhs is indirect. */ tree lhs_base_decl; /* The alias set of the lhs object. */ int lhs_alias_set; }; static tree gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata) { struct gimplify_init_ctor_preeval_data *data = (struct gimplify_init_ctor_preeval_data *) xdata; tree t = *tp; /* If we find the base object, obviously we have overlap. */ if (data->lhs_base_decl == t) return t; /* If the constructor component is indirect, determine if we have a potential overlap with the lhs. The only bits of information we have to go on at this point are addressability and alias sets. */ if (TREE_CODE (t) == INDIRECT_REF && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t))) return t; /* If the constructor component is a call, determine if it can hide a potential overlap with the lhs through an INDIRECT_REF like above. */ if (TREE_CODE (t) == CALL_EXPR) { tree type, fntype = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type)) if (POINTER_TYPE_P (TREE_VALUE (type)) && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (TREE_TYPE (TREE_VALUE (type))))) return t; } if (IS_TYPE_OR_DECL_P (t)) *walk_subtrees = 0; return NULL; } /* A subroutine of gimplify_init_constructor. Pre-evaluate *EXPR_P, force values that overlap with the lhs (as described by *DATA) into temporaries. */ static void gimplify_init_ctor_preeval (tree *expr_p, tree *pre_p, tree *post_p, struct gimplify_init_ctor_preeval_data *data) { enum gimplify_status one; /* If the value is invariant, then there's nothing to pre-evaluate. But ensure it doesn't have any side-effects since a SAVE_EXPR is invariant but has side effects and might contain a reference to the object we're initializing. */ if (TREE_INVARIANT (*expr_p) && !TREE_SIDE_EFFECTS (*expr_p)) return; /* If the type has non-trivial constructors, we can't pre-evaluate. */ if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p))) return; /* Recurse for nested constructors. */ if (TREE_CODE (*expr_p) == CONSTRUCTOR) { unsigned HOST_WIDE_INT ix; constructor_elt *ce; VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (*expr_p); for (ix = 0; VEC_iterate (constructor_elt, v, ix, ce); ix++) gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data); return; } /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (expr_p); /* Gimplify the constructor element to something appropriate for the rhs of a MODIFY_EXPR. Given that we know the lhs is an aggregate, we know the gimplifier will consider this a store to memory. Doing this gimplification now means that we won't have to deal with complicated language-specific trees, nor trees like SAVE_EXPR that can induce exponential search behavior. */ one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue); if (one == GS_ERROR) { *expr_p = NULL; return; } /* If we gimplified to a bare decl, we can be sure that it doesn't overlap with the lhs, since "a = { .x=a }" doesn't make sense. This will always be true for all scalars, since is_gimple_mem_rhs insists on a temporary variable for them. */ if (DECL_P (*expr_p)) return; /* If this is of variable size, we have no choice but to assume it doesn't overlap since we can't make a temporary for it. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST) return; /* Otherwise, we must search for overlap ... */ if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL)) return; /* ... and if found, force the value into a temporary. */ *expr_p = get_formal_tmp_var (*expr_p, pre_p); } /* A subroutine of gimplify_init_ctor_eval. Create a loop for a RANGE_EXPR in a CONSTRUCTOR for an array. var = lower; loop_entry: object[var] = value; if (var == upper) goto loop_exit; var = var + 1; goto loop_entry; loop_exit: We increment var _after_ the loop exit check because we might otherwise fail if upper == TYPE_MAX_VALUE (type for upper). Note that we never have to deal with SAVE_EXPRs here, because this has already been taken care of for us, in gimplify_init_ctor_preeval(). */ static void gimplify_init_ctor_eval (tree, VEC(constructor_elt,gc) *, tree *, bool); static void gimplify_init_ctor_eval_range (tree object, tree lower, tree upper, tree value, tree array_elt_type, tree *pre_p, bool cleared) { tree loop_entry_label, loop_exit_label; tree var, var_type, cref; loop_entry_label = create_artificial_label (); loop_exit_label = create_artificial_label (); /* Create and initialize the index variable. */ var_type = TREE_TYPE (upper); var = create_tmp_var (var_type, NULL); append_to_statement_list (build2 (MODIFY_EXPR, var_type, var, lower), pre_p); /* Add the loop entry label. */ append_to_statement_list (build1 (LABEL_EXPR, void_type_node, loop_entry_label), pre_p); /* Build the reference. */ cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), var, NULL_TREE, NULL_TREE); /* If we are a constructor, just call gimplify_init_ctor_eval to do the store. Otherwise just assign value to the reference. */ if (TREE_CODE (value) == CONSTRUCTOR) /* NB we might have to call ourself recursively through gimplify_init_ctor_eval if the value is a constructor. */ gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else append_to_statement_list (build2 (MODIFY_EXPR, TREE_TYPE (cref), cref, value), pre_p); /* We exit the loop when the index var is equal to the upper bound. */ gimplify_and_add (build3 (COND_EXPR, void_type_node, build2 (EQ_EXPR, boolean_type_node, var, upper), build1 (GOTO_EXPR, void_type_node, loop_exit_label), NULL_TREE), pre_p); /* Otherwise, increment the index var... */ append_to_statement_list (build2 (MODIFY_EXPR, var_type, var, build2 (PLUS_EXPR, var_type, var, fold_convert (var_type, integer_one_node))), pre_p); /* ...and jump back to the loop entry. */ append_to_statement_list (build1 (GOTO_EXPR, void_type_node, loop_entry_label), pre_p); /* Add the loop exit label. */ append_to_statement_list (build1 (LABEL_EXPR, void_type_node, loop_exit_label), pre_p); } /* Return true if FDECL is accessing a field that is zero sized. */ static bool zero_sized_field_decl (tree fdecl) { if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl) && integer_zerop (DECL_SIZE (fdecl))) return true; return false; } /* Return true if TYPE is zero sized. */ static bool zero_sized_type (tree type) { if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && integer_zerop (TYPE_SIZE (type))) return true; return false; } /* A subroutine of gimplify_init_constructor. Generate individual MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the CONSTRUCTOR. CLEARED is true if the entire LHS object has been zeroed first. */ static void gimplify_init_ctor_eval (tree object, VEC(constructor_elt,gc) *elts, tree *pre_p, bool cleared) { tree array_elt_type = NULL; unsigned HOST_WIDE_INT ix; tree purpose, value; if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE) array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object))); FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value) { tree cref, init; /* NULL values are created above for gimplification errors. */ if (value == NULL) continue; if (cleared && initializer_zerop (value)) continue; /* ??? Here's to hoping the front end fills in all of the indices, so we don't have to figure out what's missing ourselves. */ gcc_assert (purpose); /* Skip zero-sized fields, unless value has side-effects. This can happen with calls to functions returning a zero-sized type, which we shouldn't discard. As a number of downstream passes don't expect sets of zero-sized fields, we rely on the gimplification of the MODIFY_EXPR we make below to drop the assignment statement. */ if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose)) continue; /* If we have a RANGE_EXPR, we have to build a loop to assign the whole range. */ if (TREE_CODE (purpose) == RANGE_EXPR) { tree lower = TREE_OPERAND (purpose, 0); tree upper = TREE_OPERAND (purpose, 1); /* If the lower bound is equal to upper, just treat it as if upper was the index. */ if (simple_cst_equal (lower, upper)) purpose = upper; else { gimplify_init_ctor_eval_range (object, lower, upper, value, array_elt_type, pre_p, cleared); continue; } } if (array_elt_type) { cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), purpose, NULL_TREE, NULL_TREE); } else { gcc_assert (TREE_CODE (purpose) == FIELD_DECL); cref = build3 (COMPONENT_REF, TREE_TYPE (purpose), unshare_expr (object), purpose, NULL_TREE); } if (TREE_CODE (value) == CONSTRUCTOR && TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE) gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else { init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value); gimplify_and_add (init, pre_p); } } } /* A subroutine of gimplify_modify_expr. Break out elements of a CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs. Note that we still need to clear any elements that don't have explicit initializers, so if not all elements are initialized we keep the original MODIFY_EXPR, we just remove all of the constructor elements. */ static enum gimplify_status gimplify_init_constructor (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { tree object; tree ctor = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (ctor); enum gimplify_status ret; VEC(constructor_elt,gc) *elts; if (TREE_CODE (ctor) != CONSTRUCTOR) return GS_UNHANDLED; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; object = TREE_OPERAND (*expr_p, 0); elts = CONSTRUCTOR_ELTS (ctor); ret = GS_ALL_DONE; switch (TREE_CODE (type)) { case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: case ARRAY_TYPE: { struct gimplify_init_ctor_preeval_data preeval_data; HOST_WIDE_INT num_type_elements, num_ctor_elements; HOST_WIDE_INT num_nonzero_elements; bool cleared, valid_const_initializer; /* Aggregate types must lower constructors to initialization of individual elements. The exception is that a CONSTRUCTOR node with no elements indicates zero-initialization of the whole. */ if (VEC_empty (constructor_elt, elts)) break; /* Fetch information about the constructor to direct later processing. We might want to make static versions of it in various cases, and can only do so if it known to be a valid constant initializer. */ valid_const_initializer = categorize_ctor_elements (ctor, &num_nonzero_elements, &num_ctor_elements, &cleared); /* If a const aggregate variable is being initialized, then it should never be a lose to promote the variable to be static. */ if (valid_const_initializer && num_nonzero_elements > 1 && TREE_READONLY (object) && TREE_CODE (object) == VAR_DECL) { DECL_INITIAL (object) = ctor; TREE_STATIC (object) = 1; if (!DECL_NAME (object)) DECL_NAME (object) = create_tmp_var_name ("C"); walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL); /* ??? C++ doesn't automatically append a .<number> to the assembler name, and even when it does, it looks a FE private data structures to figure out what that number should be, which are not set for this variable. I suppose this is important for local statics for inline functions, which aren't "local" in the object file sense. So in order to get a unique TU-local symbol, we must invoke the lhd version now. */ lhd_set_decl_assembler_name (object); *expr_p = NULL_TREE; break; } /* If there are "lots" of initialized elements, even discounting those that are not address constants (and thus *must* be computed at runtime), then partition the constructor into constant and non-constant parts. Block copy the constant parts in, then generate code for the non-constant parts. */ /* TODO. There's code in cp/typeck.c to do this. */ num_type_elements = count_type_elements (type, true); /* If count_type_elements could not determine number of type elements for a constant-sized object, assume clearing is needed. Don't do this for variable-sized objects, as store_constructor will ignore the clearing of variable-sized objects. */ if (num_type_elements < 0 && int_size_in_bytes (type) >= 0) cleared = true; /* If there are "lots" of zeros, then block clear the object first. */ else if (num_type_elements - num_nonzero_elements > CLEAR_RATIO && num_nonzero_elements < num_type_elements/4) cleared = true; /* ??? This bit ought not be needed. For any element not present in the initializer, we should simply set them to zero. Except we'd need to *find* the elements that are not present, and that requires trickery to avoid quadratic compile-time behavior in large cases or excessive memory use in small cases. */ else if (num_ctor_elements < num_type_elements) cleared = true; /* If there are "lots" of initialized elements, and all of them are valid address constants, then the entire initializer can be dropped to memory, and then memcpy'd out. Don't do this for sparse arrays, though, as it's more efficient to follow the standard CONSTRUCTOR behavior of memset followed by individual element initialization. */ if (valid_const_initializer && !cleared) { HOST_WIDE_INT size = int_size_in_bytes (type); unsigned int align; /* ??? We can still get unbounded array types, at least from the C++ front end. This seems wrong, but attempt to work around it for now. */ if (size < 0) { size = int_size_in_bytes (TREE_TYPE (object)); if (size >= 0) TREE_TYPE (ctor) = type = TREE_TYPE (object); } /* Find the maximum alignment we can assume for the object. */ /* ??? Make use of DECL_OFFSET_ALIGN. */ if (DECL_P (object)) align = DECL_ALIGN (object); else align = TYPE_ALIGN (type); if (size > 0 && !can_move_by_pieces (size, align)) { tree new = create_tmp_var_raw (type, "C"); gimple_add_tmp_var (new); TREE_STATIC (new) = 1; TREE_READONLY (new) = 1; DECL_INITIAL (new) = ctor; if (align > DECL_ALIGN (new)) { DECL_ALIGN (new) = align; DECL_USER_ALIGN (new) = 1; } walk_tree (&DECL_INITIAL (new), force_labels_r, NULL, NULL); TREE_OPERAND (*expr_p, 1) = new; /* This is no longer an assignment of a CONSTRUCTOR, but we still may have processing to do on the LHS. So pretend we didn't do anything here to let that happen. */ return GS_UNHANDLED; } } /* If there are nonzero elements, pre-evaluate to capture elements overlapping with the lhs into temporaries. We must do this before clearing to fetch the values before they are zeroed-out. */ if (num_nonzero_elements > 0) { preeval_data.lhs_base_decl = get_base_address (object); if (!DECL_P (preeval_data.lhs_base_decl)) preeval_data.lhs_base_decl = NULL; preeval_data.lhs_alias_set = get_alias_set (object); gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, &preeval_data); } if (cleared) { /* Zap the CONSTRUCTOR element list, which simplifies this case. Note that we still have to gimplify, in order to handle the case of variable sized types. Avoid shared tree structures. */ CONSTRUCTOR_ELTS (ctor) = NULL; object = unshare_expr (object); gimplify_stmt (expr_p); append_to_statement_list (*expr_p, pre_p); } /* If we have not block cleared the object, or if there are nonzero elements in the constructor, add assignments to the individual scalar fields of the object. */ if (!cleared || num_nonzero_elements > 0) gimplify_init_ctor_eval (object, elts, pre_p, cleared); *expr_p = NULL_TREE; } break; case COMPLEX_TYPE: { tree r, i; /* Extract the real and imaginary parts out of the ctor. */ gcc_assert (VEC_length (constructor_elt, elts) == 2); r = VEC_index (constructor_elt, elts, 0)->value; i = VEC_index (constructor_elt, elts, 1)->value; if (r == NULL || i == NULL) { tree zero = fold_convert (TREE_TYPE (type), integer_zero_node); if (r == NULL) r = zero; if (i == NULL) i = zero; } /* Complex types have either COMPLEX_CST or COMPLEX_EXPR to represent creation of a complex value. */ if (TREE_CONSTANT (r) && TREE_CONSTANT (i)) { ctor = build_complex (type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; } else { ctor = build2 (COMPLEX_EXPR, type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, rhs_predicate_for (TREE_OPERAND (*expr_p, 0)), fb_rvalue); } } break; case VECTOR_TYPE: { unsigned HOST_WIDE_INT ix; constructor_elt *ce; /* Go ahead and simplify constant constructors to VECTOR_CST. */ if (TREE_CONSTANT (ctor)) { bool constant_p = true; tree value; /* Even when ctor is constant, it might contain non-*_CST elements (e.g. { 1.0/0.0 - 1.0/0.0, 0.0 }) and those don't belong into VECTOR_CST nodes. */ FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value) if (!CONSTANT_CLASS_P (value)) { constant_p = false; break; } if (constant_p) { TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts); break; } /* Don't reduce a TREE_CONSTANT vector ctor even if we can't make a VECTOR_CST. It won't do anything for us, and it'll prevent us from representing it as a single constant. */ break; } /* Vector types use CONSTRUCTOR all the way through gimple compilation as a general initializer. */ for (ix = 0; VEC_iterate (constructor_elt, elts, ix, ce); ix++) { enum gimplify_status tret; tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val, fb_rvalue); if (tret == GS_ERROR) ret = GS_ERROR; } } break; default: /* So how did we get a CONSTRUCTOR for a scalar type? */ gcc_unreachable (); } if (ret == GS_ERROR) return GS_ERROR; else if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = object; return GS_OK; } else return GS_ALL_DONE; } /* Given a pointer value OP0, return a simplified version of an indirection through OP0, or NULL_TREE if no simplification is possible. This may only be applied to a rhs of an expression. Note that the resulting type may be different from the type pointed to in the sense that it is still compatible from the langhooks point of view. */ static tree fold_indirect_ref_rhs (tree t) { tree type = TREE_TYPE (TREE_TYPE (t)); tree sub = t; tree subtype; STRIP_USELESS_TYPE_CONVERSION (sub); subtype = TREE_TYPE (sub); if (!POINTER_TYPE_P (subtype)) return NULL_TREE; if (TREE_CODE (sub) == ADDR_EXPR) { tree op = TREE_OPERAND (sub, 0); tree optype = TREE_TYPE (op); /* *&p => p */ if (lang_hooks.types_compatible_p (type, optype)) return op; /* *(foo *)&fooarray => fooarray[0] */ else if (TREE_CODE (optype) == ARRAY_TYPE && lang_hooks.types_compatible_p (type, TREE_TYPE (optype))) { tree type_domain = TYPE_DOMAIN (optype); tree min_val = size_zero_node; if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); return build4 (ARRAY_REF, type, op, min_val, NULL_TREE, NULL_TREE); } } /* *(foo *)fooarrptr => (*fooarrptr)[0] */ if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE && lang_hooks.types_compatible_p (type, TREE_TYPE (TREE_TYPE (subtype)))) { tree type_domain; tree min_val = size_zero_node; tree osub = sub; sub = fold_indirect_ref_rhs (sub); if (! sub) sub = build1 (INDIRECT_REF, TREE_TYPE (subtype), osub); type_domain = TYPE_DOMAIN (TREE_TYPE (sub)); if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); return build4 (ARRAY_REF, type, sub, min_val, NULL_TREE, NULL_TREE); } return NULL_TREE; } /* Subroutine of gimplify_modify_expr to do simplifications of MODIFY_EXPRs based on the code of the RHS. We loop for as long as something changes. */ static enum gimplify_status gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p, tree *pre_p, tree *post_p, bool want_value) { enum gimplify_status ret = GS_OK; while (ret != GS_UNHANDLED) switch (TREE_CODE (*from_p)) { case INDIRECT_REF: { /* If we have code like *(const A*)(A*)&x where the type of "x" is a (possibly cv-qualified variant of "A"), treat the entire expression as identical to "x". This kind of code arises in C++ when an object is bound to a const reference, and if "x" is a TARGET_EXPR we want to take advantage of the optimization below. */ tree t = fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0)); if (t) { *from_p = t; ret = GS_OK; } else ret = GS_UNHANDLED; break; } case TARGET_EXPR: { /* If we are initializing something from a TARGET_EXPR, strip the TARGET_EXPR and initialize it directly, if possible. This can't be done if the initializer is void, since that implies that the temporary is set in some non-trivial way. ??? What about code that pulls out the temp and uses it elsewhere? I think that such code never uses the TARGET_EXPR as an initializer. If I'm wrong, we'll die because the temp won't have any RTL. In that case, I guess we'll need to replace references somehow. */ tree init = TARGET_EXPR_INITIAL (*from_p); if (!VOID_TYPE_P (TREE_TYPE (init))) { *from_p = init; ret = GS_OK; } else ret = GS_UNHANDLED; } break; case COMPOUND_EXPR: /* Remove any COMPOUND_EXPR in the RHS so the following cases will be caught. */ gimplify_compound_expr (from_p, pre_p, true); ret = GS_OK; break; case CONSTRUCTOR: /* If we're initializing from a CONSTRUCTOR, break this into individual MODIFY_EXPRs. */ return gimplify_init_constructor (expr_p, pre_p, post_p, want_value); case COND_EXPR: /* If we're assigning to a non-register type, push the assignment down into the branches. This is mandatory for ADDRESSABLE types, since we cannot generate temporaries for such, but it saves a copy in other cases as well. */ if (!is_gimple_reg_type (TREE_TYPE (*from_p))) { /* This code should mirror the code in gimplify_cond_expr. */ enum tree_code code = TREE_CODE (*expr_p); tree cond = *from_p; tree result = *to_p; ret = gimplify_expr (&result, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node) TREE_OPERAND (cond, 1) = build2 (code, void_type_node, result, TREE_OPERAND (cond, 1)); if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node) TREE_OPERAND (cond, 2) = build2 (code, void_type_node, unshare_expr (result), TREE_OPERAND (cond, 2)); TREE_TYPE (cond) = void_type_node; recalculate_side_effects (cond); if (want_value) { gimplify_and_add (cond, pre_p); *expr_p = unshare_expr (result); } else *expr_p = cond; return ret; } else ret = GS_UNHANDLED; break; case CALL_EXPR: /* For calls that return in memory, give *to_p as the CALL_EXPR's return slot so that we don't generate a temporary. */ if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p) && aggregate_value_p (*from_p, *from_p)) { bool use_target; if (!(rhs_predicate_for (*to_p))(*from_p)) /* If we need a temporary, *to_p isn't accurate. */ use_target = false; else if (TREE_CODE (*to_p) == RESULT_DECL && DECL_NAME (*to_p) == NULL_TREE && needs_to_live_in_memory (*to_p)) /* It's OK to use the return slot directly unless it's an NRV. */ use_target = true; else if (is_gimple_reg_type (TREE_TYPE (*to_p)) || (DECL_P (*to_p) && DECL_REGISTER (*to_p))) /* Don't force regs into memory. */ use_target = false; else if (TREE_CODE (*to_p) == VAR_DECL && DECL_GIMPLE_FORMAL_TEMP_P (*to_p)) /* Don't use the original target if it's a formal temp; we don't want to take their addresses. */ use_target = false; else if (TREE_CODE (*expr_p) == INIT_EXPR) /* It's OK to use the target directly if it's being initialized. */ use_target = true; else if (!is_gimple_non_addressable (*to_p)) /* Don't use the original target if it's already addressable; if its address escapes, and the called function uses the NRV optimization, a conforming program could see *to_p change before the called function returns; see c++/19317. When optimizing, the return_slot pass marks more functions as safe after we have escape info. */ use_target = false; else use_target = true; if (use_target) { CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1; lang_hooks.mark_addressable (*to_p); } } ret = GS_UNHANDLED; break; /* If we're initializing from a container, push the initialization inside it. */ case CLEANUP_POINT_EXPR: case BIND_EXPR: case STATEMENT_LIST: { tree wrap = *from_p; tree t; ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; t = voidify_wrapper_expr (wrap, *expr_p); gcc_assert (t == *expr_p); if (want_value) { gimplify_and_add (wrap, pre_p); *expr_p = unshare_expr (*to_p); } else *expr_p = wrap; return GS_OK; } default: ret = GS_UNHANDLED; break; } return ret; } /* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with DECL_COMPLEX_GIMPLE_REG_P set. */ static enum gimplify_status gimplify_modify_expr_complex_part (tree *expr_p, tree *pre_p, bool want_value) { enum tree_code code, ocode; tree lhs, rhs, new_rhs, other, realpart, imagpart; lhs = TREE_OPERAND (*expr_p, 0); rhs = TREE_OPERAND (*expr_p, 1); code = TREE_CODE (lhs); lhs = TREE_OPERAND (lhs, 0); ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR; other = build1 (ocode, TREE_TYPE (rhs), lhs); other = get_formal_tmp_var (other, pre_p); realpart = code == REALPART_EXPR ? rhs : other; imagpart = code == REALPART_EXPR ? other : rhs; if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart)) new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart); else new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart); TREE_OPERAND (*expr_p, 0) = lhs; TREE_OPERAND (*expr_p, 1) = new_rhs; if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = rhs; } return GS_ALL_DONE; } /* Gimplify the MODIFY_EXPR node pointed to by EXPR_P. modify_expr : varname '=' rhs | '*' ID '=' rhs PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_modify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { tree *from_p = &TREE_OPERAND (*expr_p, 1); tree *to_p = &TREE_OPERAND (*expr_p, 0); enum gimplify_status ret = GS_UNHANDLED; gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR || TREE_CODE (*expr_p) == INIT_EXPR); /* For zero sized types only gimplify the left hand side and right hand side as statements and throw away the assignment. */ if (zero_sized_type (TREE_TYPE (*from_p))) { gimplify_stmt (from_p); gimplify_stmt (to_p); append_to_statement_list (*from_p, pre_p); append_to_statement_list (*to_p, pre_p); *expr_p = NULL_TREE; return GS_ALL_DONE; } /* See if any simplifications can be done based on what the RHS is. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* If the value being copied is of variable width, compute the length of the copy into a WITH_SIZE_EXPR. Note that we need to do this before gimplifying any of the operands so that we can resolve any PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses the size of the expression to be copied, not of the destination, so that is what we must here. */ maybe_with_size_expr (from_p); ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; ret = gimplify_expr (from_p, pre_p, post_p, rhs_predicate_for (*to_p), fb_rvalue); if (ret == GS_ERROR) return ret; /* Now see if the above changed *from_p to something we handle specially. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* If we've got a variable sized assignment between two lvalues (i.e. does not involve a call), then we can make things a bit more straightforward by converting the assignment to memcpy or memset. */ if (TREE_CODE (*from_p) == WITH_SIZE_EXPR) { tree from = TREE_OPERAND (*from_p, 0); tree size = TREE_OPERAND (*from_p, 1); if (TREE_CODE (from) == CONSTRUCTOR) return gimplify_modify_expr_to_memset (expr_p, size, want_value); if (is_gimple_addressable (from)) { *from_p = from; return gimplify_modify_expr_to_memcpy (expr_p, size, want_value); } } /* Transform partial stores to non-addressable complex variables into total stores. This allows us to use real instead of virtual operands for these variables, which improves optimization. */ if ((TREE_CODE (*to_p) == REALPART_EXPR || TREE_CODE (*to_p) == IMAGPART_EXPR) && is_gimple_reg (TREE_OPERAND (*to_p, 0))) return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value); if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p)) { /* If we've somehow already got an SSA_NAME on the LHS, then we're probably modified it twice. Not good. */ gcc_assert (TREE_CODE (*to_p) != SSA_NAME); *to_p = make_ssa_name (*to_p, *expr_p); } if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = *to_p; return GS_OK; } return GS_ALL_DONE; } /* Gimplify a comparison between two variable-sized objects. Do this with a call to BUILT_IN_MEMCMP. */ static enum gimplify_status gimplify_variable_sized_compare (tree *expr_p) { tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree args, t, dest; t = TYPE_SIZE_UNIT (TREE_TYPE (op0)); t = unshare_expr (t); t = SUBSTITUTE_PLACEHOLDER_IN_EXPR (t, op0); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (op1); args = tree_cons (NULL, t, args); dest = build_fold_addr_expr (op0); args = tree_cons (NULL, dest, args); t = implicit_built_in_decls[BUILT_IN_MEMCMP]; t = build_function_call_expr (t, args); *expr_p = build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node); return GS_OK; } /* Gimplify a comparison between two aggregate objects of integral scalar mode as a comparison between the bitwise equivalent scalar values. */ static enum gimplify_status gimplify_scalar_mode_aggregate_compare (tree *expr_p) { tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (op0); tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1); op0 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op0); op1 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op1); *expr_p = fold_build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1); return GS_OK; } /* Gimplify TRUTH_ANDIF_EXPR and TRUTH_ORIF_EXPR expressions. EXPR_P points to the expression to gimplify. Expressions of the form 'a && b' are gimplified to: a && b ? true : false gimplify_cond_expr will do the rest. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_boolean_expr (tree *expr_p) { /* Preserve the original type of the expression. */ tree type = TREE_TYPE (*expr_p); *expr_p = build3 (COND_EXPR, type, *expr_p, fold_convert (type, boolean_true_node), fold_convert (type, boolean_false_node)); return GS_OK; } /* Gimplifies an expression sequence. This function gimplifies each expression and re-writes the original expression with the last expression of the sequence in GIMPLE form. PRE_P points to the list where the side effects for all the expressions in the sequence will be emitted. WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */ /* ??? Should rearrange to share the pre-queue with all the indirect invocations of gimplify_expr. Would probably save on creations of statement_list nodes. */ static enum gimplify_status gimplify_compound_expr (tree *expr_p, tree *pre_p, bool want_value) { tree t = *expr_p; do { tree *sub_p = &TREE_OPERAND (t, 0); if (TREE_CODE (*sub_p) == COMPOUND_EXPR) gimplify_compound_expr (sub_p, pre_p, false); else gimplify_stmt (sub_p); append_to_statement_list (*sub_p, pre_p); t = TREE_OPERAND (t, 1); } while (TREE_CODE (t) == COMPOUND_EXPR); *expr_p = t; if (want_value) return GS_OK; else { gimplify_stmt (expr_p); return GS_ALL_DONE; } } /* Gimplifies a statement list. These may be created either by an enlightened front-end, or by shortcut_cond_expr. */ static enum gimplify_status gimplify_statement_list (tree *expr_p, tree *pre_p) { tree temp = voidify_wrapper_expr (*expr_p, NULL); tree_stmt_iterator i = tsi_start (*expr_p); while (!tsi_end_p (i)) { tree t; gimplify_stmt (tsi_stmt_ptr (i)); t = tsi_stmt (i); if (t == NULL) tsi_delink (&i); else if (TREE_CODE (t) == STATEMENT_LIST) { tsi_link_before (&i, t, TSI_SAME_STMT); tsi_delink (&i); } else tsi_next (&i); } if (temp) { append_to_statement_list (*expr_p, pre_p); *expr_p = temp; return GS_OK; } return GS_ALL_DONE; } /* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to gimplify. After gimplification, EXPR_P will point to a new temporary that holds the original value of the SAVE_EXPR node. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_save_expr (tree *expr_p, tree *pre_p, tree *post_p) { enum gimplify_status ret = GS_ALL_DONE; tree val; gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR); val = TREE_OPERAND (*expr_p, 0); /* If the SAVE_EXPR has not been resolved, then evaluate it once. */ if (!SAVE_EXPR_RESOLVED_P (*expr_p)) { /* The operand may be a void-valued expression such as SAVE_EXPRs generated by the Java frontend for class initialization. It is being executed only for its side-effects. */ if (TREE_TYPE (val) == void_type_node) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_stmt, fb_none); append_to_statement_list (TREE_OPERAND (*expr_p, 0), pre_p); val = NULL; } else val = get_initialized_tmp_var (val, pre_p, post_p); TREE_OPERAND (*expr_p, 0) = val; SAVE_EXPR_RESOLVED_P (*expr_p) = 1; } *expr_p = val; return ret; } /* Re-write the ADDR_EXPR node pointed to by EXPR_P unary_expr : ... | '&' varname ... PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_addr_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree expr = *expr_p; tree op0 = TREE_OPERAND (expr, 0); enum gimplify_status ret; switch (TREE_CODE (op0)) { case INDIRECT_REF: case MISALIGNED_INDIRECT_REF: do_indirect_ref: /* Check if we are dealing with an expression of the form '&*ptr'. While the front end folds away '&*ptr' into 'ptr', these expressions may be generated internally by the compiler (e.g., builtins like __builtin_va_end). */ /* Caution: the silent array decomposition semantics we allow for ADDR_EXPR means we can't always discard the pair. */ /* Gimplification of the ADDR_EXPR operand may drop cv-qualification conversions, so make sure we add them if needed. */ { tree op00 = TREE_OPERAND (op0, 0); tree t_expr = TREE_TYPE (expr); tree t_op00 = TREE_TYPE (op00); if (!lang_hooks.types_compatible_p (t_expr, t_op00)) { #ifdef ENABLE_CHECKING tree t_op0 = TREE_TYPE (op0); gcc_assert (POINTER_TYPE_P (t_expr) && cpt_same_type (TREE_CODE (t_op0) == ARRAY_TYPE ? TREE_TYPE (t_op0) : t_op0, TREE_TYPE (t_expr)) && POINTER_TYPE_P (t_op00) && cpt_same_type (t_op0, TREE_TYPE (t_op00))); #endif op00 = fold_convert (TREE_TYPE (expr), op00); } *expr_p = op00; ret = GS_OK; } break; case VIEW_CONVERT_EXPR: /* Take the address of our operand and then convert it to the type of this ADDR_EXPR. ??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at all clear. The impact of this transformation is even less clear. */ /* If the operand is a useless conversion, look through it. Doing so guarantees that the ADDR_EXPR and its operand will remain of the same type. */ if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0))) op0 = TREE_OPERAND (op0, 0); *expr_p = fold_convert (TREE_TYPE (expr), build_fold_addr_expr (TREE_OPERAND (op0, 0))); ret = GS_OK; break; default: /* We use fb_either here because the C frontend sometimes takes the address of a call that returns a struct; see gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make the implied temporary explicit. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p, is_gimple_addressable, fb_either); if (ret != GS_ERROR) { op0 = TREE_OPERAND (expr, 0); /* For various reasons, the gimplification of the expression may have made a new INDIRECT_REF. */ if (TREE_CODE (op0) == INDIRECT_REF) goto do_indirect_ref; /* Make sure TREE_INVARIANT, TREE_CONSTANT, and TREE_SIDE_EFFECTS is set properly. */ recompute_tree_invariant_for_addr_expr (expr); /* Mark the RHS addressable. */ lang_hooks.mark_addressable (TREE_OPERAND (expr, 0)); } break; } return ret; } /* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple value; output operands should be a gimple lvalue. */ static enum gimplify_status gimplify_asm_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree expr = *expr_p; int noutputs = list_length (ASM_OUTPUTS (expr)); const char **oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); int i; tree link; const char *constraint; bool allows_mem, allows_reg, is_inout; enum gimplify_status ret, tret; ret = GS_ALL_DONE; for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = TREE_CHAIN (link)) { size_t constraint_len; oconstraints[i] = constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); constraint_len = strlen (constraint); if (constraint_len == 0) continue; parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); if (!allows_reg && allows_mem) lang_hooks.mark_addressable (TREE_VALUE (link)); tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); if (tret == GS_ERROR) { error ("invalid lvalue in asm output %d", i); ret = tret; } if (is_inout) { /* An input/output operand. To give the optimizers more flexibility, split it into separate input and output operands. */ tree input; char buf[10]; /* Turn the in/out constraint into an output constraint. */ char *p = xstrdup (constraint); p[0] = '='; TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p); /* And add a matching input constraint. */ if (allows_reg) { sprintf (buf, "%d", i); /* If there are multiple alternatives in the constraint, handle each of them individually. Those that allow register will be replaced with operand number, the others will stay unchanged. */ if (strchr (p, ',') != NULL) { size_t len = 0, buflen = strlen (buf); char *beg, *end, *str, *dst; for (beg = p + 1;;) { end = strchr (beg, ','); if (end == NULL) end = strchr (beg, '\0'); if ((size_t) (end - beg) < buflen) len += buflen + 1; else len += end - beg + 1; if (*end) beg = end + 1; else break; } str = (char *) alloca (len); for (beg = p + 1, dst = str;;) { const char *tem; bool mem_p, reg_p, inout_p; end = strchr (beg, ','); if (end) *end = '\0'; beg[-1] = '='; tem = beg - 1; parse_output_constraint (&tem, i, 0, 0, &mem_p, &reg_p, &inout_p); if (dst != str) *dst++ = ','; if (reg_p) { memcpy (dst, buf, buflen); dst += buflen; } else { if (end) len = end - beg; else len = strlen (beg); memcpy (dst, beg, len); dst += len; } if (end) beg = end + 1; else break; } *dst = '\0'; input = build_string (dst - str, str); } else input = build_string (strlen (buf), buf); } else input = build_string (constraint_len - 1, constraint + 1); free (p); input = build_tree_list (build_tree_list (NULL_TREE, input), unshare_expr (TREE_VALUE (link))); ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input); } } for (link = ASM_INPUTS (expr); link; ++i, link = TREE_CHAIN (link)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If the operand is a memory input, it should be an lvalue. */ if (!allows_reg && allows_mem) { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_lvalue, fb_lvalue | fb_mayfail); lang_hooks.mark_addressable (TREE_VALUE (link)); if (tret == GS_ERROR) { error ("memory input %d is not directly addressable", i); ret = tret; } } else { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_asm_val, fb_rvalue); if (tret == GS_ERROR) ret = tret; } } return ret; } /* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we return to this function. FIXME should we complexify the prequeue handling instead? Or use flags for all the cleanups and let the optimizer tighten them up? The current code seems pretty fragile; it will break on a cleanup within any non-conditional nesting. But any such nesting would be broken, anyway; we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct and continues out of it. We can do that at the RTL level, though, so having an optimizer to tighten up try/finally regions would be a Good Thing. */ static enum gimplify_status gimplify_cleanup_point_expr (tree *expr_p, tree *pre_p) { tree_stmt_iterator iter; tree body; tree temp = voidify_wrapper_expr (*expr_p, NULL); /* We only care about the number of conditions between the innermost CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and any cleanups collected outside the CLEANUP_POINT_EXPR. */ int old_conds = gimplify_ctxp->conditions; tree old_cleanups = gimplify_ctxp->conditional_cleanups; gimplify_ctxp->conditions = 0; gimplify_ctxp->conditional_cleanups = NULL_TREE; body = TREE_OPERAND (*expr_p, 0); gimplify_to_stmt_list (&body); gimplify_ctxp->conditions = old_conds; gimplify_ctxp->conditional_cleanups = old_cleanups; for (iter = tsi_start (body); !tsi_end_p (iter); ) { tree *wce_p = tsi_stmt_ptr (iter); tree wce = *wce_p; if (TREE_CODE (wce) == WITH_CLEANUP_EXPR) { if (tsi_one_before_end_p (iter)) { tsi_link_before (&iter, TREE_OPERAND (wce, 0), TSI_SAME_STMT); tsi_delink (&iter); break; } else { tree sl, tfe; enum tree_code code; if (CLEANUP_EH_ONLY (wce)) code = TRY_CATCH_EXPR; else code = TRY_FINALLY_EXPR; sl = tsi_split_statement_list_after (&iter); tfe = build2 (code, void_type_node, sl, NULL_TREE); append_to_statement_list (TREE_OPERAND (wce, 0), &TREE_OPERAND (tfe, 1)); *wce_p = tfe; iter = tsi_start (sl); } } else tsi_next (&iter); } if (temp) { *expr_p = temp; append_to_statement_list (body, pre_p); return GS_OK; } else { *expr_p = body; return GS_ALL_DONE; } } /* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP is the cleanup action required. */ static void gimple_push_cleanup (tree var, tree cleanup, bool eh_only, tree *pre_p) { tree wce; /* Errors can result in improperly nested cleanups. Which results in confusion when trying to resolve the WITH_CLEANUP_EXPR. */ if (errorcount || sorrycount) return; if (gimple_conditional_context ()) { /* If we're in a conditional context, this is more complex. We only want to run the cleanup if we actually ran the initialization that necessitates it, but we want to run it after the end of the conditional context. So we wrap the try/finally around the condition and use a flag to determine whether or not to actually run the destructor. Thus test ? f(A()) : 0 becomes (approximately) flag = 0; try { if (test) { A::A(temp); flag = 1; val = f(temp); } else { val = 0; } } finally { if (flag) A::~A(temp); } val */ tree flag = create_tmp_var (boolean_type_node, "cleanup"); tree ffalse = build2 (MODIFY_EXPR, void_type_node, flag, boolean_false_node); tree ftrue = build2 (MODIFY_EXPR, void_type_node, flag, boolean_true_node); cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL); wce = build1 (WITH_CLEANUP_EXPR, void_type_node, cleanup); append_to_statement_list (ffalse, &gimplify_ctxp->conditional_cleanups); append_to_statement_list (wce, &gimplify_ctxp->conditional_cleanups); append_to_statement_list (ftrue, pre_p); /* Because of this manipulation, and the EH edges that jump threading cannot redirect, the temporary (VAR) will appear to be used uninitialized. Don't warn. */ TREE_NO_WARNING (var) = 1; } else { wce = build1 (WITH_CLEANUP_EXPR, void_type_node, cleanup); CLEANUP_EH_ONLY (wce) = eh_only; append_to_statement_list (wce, pre_p); } gimplify_stmt (&TREE_OPERAND (wce, 0)); } /* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */ static enum gimplify_status gimplify_target_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree targ = *expr_p; tree temp = TARGET_EXPR_SLOT (targ); tree init = TARGET_EXPR_INITIAL (targ); enum gimplify_status ret; if (init) { /* TARGET_EXPR temps aren't part of the enclosing block, so add it to the temps list. */ gimple_add_tmp_var (temp); /* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the expression is supposed to initialize the slot. */ if (VOID_TYPE_P (TREE_TYPE (init))) ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); else { init = build2 (INIT_EXPR, void_type_node, temp, init); ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); } if (ret == GS_ERROR) { /* PR c++/28266 Make sure this is expanded only once. */ TARGET_EXPR_INITIAL (targ) = NULL_TREE; return GS_ERROR; } append_to_statement_list (init, pre_p); /* If needed, push the cleanup for the temp. */ if (TARGET_EXPR_CLEANUP (targ)) { gimplify_stmt (&TARGET_EXPR_CLEANUP (targ)); gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ), CLEANUP_EH_ONLY (targ), pre_p); #ifdef KEY /* bug 10962 */ if (flag_spin_file) { EMIT_TARGET_EXPR_CLEANUP(targ) = 1; if (gspin_invoked (targ)) gs_set_flag_value (targ, GS_EMIT_TARGET_EXPR_CLEANUP, 1); } #endif } /* Only expand this once. */ TREE_OPERAND (targ, 3) = init; TARGET_EXPR_INITIAL (targ) = NULL_TREE; } else /* We should have expanded this before. */ gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp)); *expr_p = temp; return GS_OK; } /* Gimplification of expression trees. */ /* Gimplify an expression which appears at statement context; usually, this means replacing it with a suitably gimple STATEMENT_LIST. */ void gimplify_stmt (tree *stmt_p) { gimplify_expr (stmt_p, NULL, NULL, is_gimple_stmt, fb_none); } /* Similarly, but force the result to be a STATEMENT_LIST. */ void gimplify_to_stmt_list (tree *stmt_p) { gimplify_stmt (stmt_p); if (!*stmt_p) *stmt_p = alloc_stmt_list (); else if (TREE_CODE (*stmt_p) != STATEMENT_LIST) { tree t = *stmt_p; *stmt_p = alloc_stmt_list (); append_to_statement_list (t, stmt_p); } } /* Add FIRSTPRIVATE entries for DECL in the OpenMP the surrounding parallels to CTX. If entries already exist, force them to be some flavor of private. If there is no enclosing parallel, do nothing. */ void omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; if (decl == NULL || !DECL_P (decl)) return; do { n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { if (n->value & GOVD_SHARED) n->value = GOVD_FIRSTPRIVATE | (n->value & GOVD_SEEN); else return; } else if (ctx->region_type != ORT_WORKSHARE) omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE); ctx = ctx->outer_context; } while (ctx); } /* Similarly for each of the type sizes of TYPE. */ static void omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type) { if (type == NULL || type == error_mark_node) return; type = TYPE_MAIN_VARIANT (type); if (pointer_set_insert (ctx->privatized_types, type)) return; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: omp_firstprivatize_variable (ctx, TYPE_MIN_VALUE (type)); omp_firstprivatize_variable (ctx, TYPE_MAX_VALUE (type)); break; case ARRAY_TYPE: omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type)); omp_firstprivatize_type_sizes (ctx, TYPE_DOMAIN (type)); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { tree field; for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { omp_firstprivatize_variable (ctx, DECL_FIELD_OFFSET (field)); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (field)); } } break; case POINTER_TYPE: case REFERENCE_TYPE: omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type)); break; default: break; } omp_firstprivatize_variable (ctx, TYPE_SIZE (type)); omp_firstprivatize_variable (ctx, TYPE_SIZE_UNIT (type)); lang_hooks.types.omp_firstprivatize_type_sizes (ctx, type); } /* Add an entry for DECL in the OpenMP context CTX with FLAGS. */ static void omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags) { splay_tree_node n; unsigned int nflags; tree t; if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) return; /* Never elide decls whose type has TREE_ADDRESSABLE set. This means there are constructors involved somewhere. */ if (TREE_ADDRESSABLE (TREE_TYPE (decl)) || TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl))) flags |= GOVD_SEEN; n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { /* We shouldn't be re-adding the decl with the same data sharing class. */ gcc_assert ((n->value & GOVD_DATA_SHARE_CLASS & flags) == 0); /* The only combination of data sharing classes we should see is FIRSTPRIVATE and LASTPRIVATE. */ nflags = n->value | flags; gcc_assert ((nflags & GOVD_DATA_SHARE_CLASS) == (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE)); n->value = nflags; return; } /* When adding a variable-sized variable, we have to handle all sorts of additional bits of data: the pointer replacement variable, and the parameters of the type. */ if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { /* Add the pointer replacement variable as PRIVATE if the variable replacement is private, else FIRSTPRIVATE since we'll need the address of the original variable either for SHARED, or for the copy into or out of the context. */ if (!(flags & GOVD_LOCAL)) { nflags = flags & GOVD_PRIVATE ? GOVD_PRIVATE : GOVD_FIRSTPRIVATE; nflags |= flags & GOVD_SEEN; t = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (t) == INDIRECT_REF); t = TREE_OPERAND (t, 0); gcc_assert (DECL_P (t)); omp_add_variable (ctx, t, nflags); } /* Add all of the variable and type parameters (which should have been gimplified to a formal temporary) as FIRSTPRIVATE. */ omp_firstprivatize_variable (ctx, DECL_SIZE_UNIT (decl)); omp_firstprivatize_variable (ctx, DECL_SIZE (decl)); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl)); /* The variable-sized variable itself is never SHARED, only some form of PRIVATE. The sharing would take place via the pointer variable which we remapped above. */ if (flags & GOVD_SHARED) flags = GOVD_PRIVATE | GOVD_DEBUG_PRIVATE | (flags & (GOVD_SEEN | GOVD_EXPLICIT)); /* We're going to make use of the TYPE_SIZE_UNIT at least in the alloca statement we generate for the variable, so make sure it is available. This isn't automatically needed for the SHARED case, since we won't be allocating local storage then. For local variables TYPE_SIZE_UNIT might not be gimplified yet, in this case omp_notice_variable will be called later on when it is gimplified. */ else if (! (flags & GOVD_LOCAL)) omp_notice_variable (ctx, TYPE_SIZE_UNIT (TREE_TYPE (decl)), true); } else if (lang_hooks.decls.omp_privatize_by_reference (decl)) { gcc_assert ((flags & GOVD_LOCAL) == 0); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl)); /* Similar to the direct variable sized case above, we'll need the size of references being privatized. */ if ((flags & GOVD_SHARED) == 0) { t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))); if (TREE_CODE (t) != INTEGER_CST) omp_notice_variable (ctx, t, true); } } splay_tree_insert (ctx->variables, (splay_tree_key)decl, flags); } /* Record the fact that DECL was used within the OpenMP context CTX. IN_CODE is true when real code uses DECL, and false when we should merely emit default(none) errors. Return true if DECL is going to be remapped and thus DECL shouldn't be gimplified into its DECL_VALUE_EXPR (if any). */ static bool omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code) { splay_tree_node n; unsigned flags = in_code ? GOVD_SEEN : 0; bool ret = false, shared; if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) return false; /* Threadprivate variables are predetermined. */ if (is_global_var (decl)) { if (DECL_THREAD_LOCAL_P (decl)) return false; if (DECL_HAS_VALUE_EXPR_P (decl)) { tree value = get_base_address (DECL_VALUE_EXPR (decl)); if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value)) return false; } } n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n == NULL) { enum omp_clause_default_kind default_kind, kind; struct gimplify_omp_ctx *octx; if (ctx->region_type == ORT_WORKSHARE) goto do_outer; /* ??? Some compiler-generated variables (like SAVE_EXPRs) could be remapped firstprivate instead of shared. To some extent this is addressed in omp_firstprivatize_type_sizes, but not effectively. */ default_kind = ctx->default_kind; kind = lang_hooks.decls.omp_predetermined_sharing (decl); if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED) default_kind = kind; switch (default_kind) { case OMP_CLAUSE_DEFAULT_NONE: error ("%qs not specified in enclosing parallel", IDENTIFIER_POINTER (DECL_NAME (decl))); error ("%Henclosing parallel", &ctx->location); /* FALLTHRU */ case OMP_CLAUSE_DEFAULT_SHARED: flags |= GOVD_SHARED; break; case OMP_CLAUSE_DEFAULT_PRIVATE: flags |= GOVD_PRIVATE; break; case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE: flags |= GOVD_FIRSTPRIVATE; break; case OMP_CLAUSE_DEFAULT_UNSPECIFIED: /* decl will be either GOVD_FIRSTPRIVATE or GOVD_SHARED. */ gcc_assert (ctx->region_type == ORT_TASK); if (ctx->outer_context) omp_notice_variable (ctx->outer_context, decl, in_code); for (octx = ctx->outer_context; octx; octx = octx->outer_context) { splay_tree_node n2; n2 = splay_tree_lookup (octx->variables, (splay_tree_key) decl); if (n2 && (n2->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED) { flags |= GOVD_FIRSTPRIVATE; break; } if ((octx->region_type & ORT_PARALLEL) != 0) break; } if (flags & GOVD_FIRSTPRIVATE) break; if (octx == NULL && (TREE_CODE (decl) == PARM_DECL || (!is_global_var (decl) && DECL_CONTEXT (decl) == current_function_decl))) { flags |= GOVD_FIRSTPRIVATE; break; } flags |= GOVD_SHARED; break; default: gcc_unreachable (); } if ((flags & GOVD_PRIVATE) && lang_hooks.decls.omp_private_outer_ref (decl)) flags |= GOVD_PRIVATE_OUTER_REF; omp_add_variable (ctx, decl, flags); shared = (flags & GOVD_SHARED) != 0; ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared); goto do_outer; } shared = ((flags | n->value) & GOVD_SHARED) != 0; ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared); /* If nothing changed, there's nothing left to do. */ if ((n->value & flags) == flags) return ret; flags |= n->value; n->value = flags; do_outer: /* If the variable is private in the current context, then we don't need to propagate anything to an outer context. */ if ((flags & GOVD_PRIVATE) && !(flags & GOVD_PRIVATE_OUTER_REF)) return ret; if (ctx->outer_context && omp_notice_variable (ctx->outer_context, decl, in_code)) return true; return ret; } /* Verify that DECL is private within CTX. If there's specific information to the contrary in the innermost scope, generate an error. */ static bool omp_is_private (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { if (n->value & GOVD_SHARED) { if (ctx == gimplify_omp_ctxp) { error ("iteration variable %qs should be private", IDENTIFIER_POINTER (DECL_NAME (decl))); n->value = GOVD_PRIVATE; return true; } else return false; } else if ((n->value & GOVD_EXPLICIT) != 0 && (ctx == gimplify_omp_ctxp || (ctx->region_type == ORT_COMBINED_PARALLEL && gimplify_omp_ctxp->outer_context == ctx))) { if ((n->value & GOVD_FIRSTPRIVATE) != 0) error ("iteration variable %qs should not be firstprivate", IDENTIFIER_POINTER (DECL_NAME (decl))); else if ((n->value & GOVD_REDUCTION) != 0) error ("iteration variable %qs should not be reduction", IDENTIFIER_POINTER (DECL_NAME (decl))); } return true; } if (ctx->region_type != ORT_WORKSHARE) return false; else if (ctx->outer_context) return omp_is_private (ctx->outer_context, decl); else return !is_global_var (decl); } /* Return true if DECL is private within a parallel region that binds to the current construct's context or in parallel region's REDUCTION clause. */ static bool omp_check_private (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; do { ctx = ctx->outer_context; if (ctx == NULL) return !(is_global_var (decl) /* References might be private, but might be shared too. */ || lang_hooks.decls.omp_privatize_by_reference (decl)); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if (n != NULL) return (n->value & GOVD_SHARED) == 0; } while (ctx->region_type == ORT_WORKSHARE); return false; } /* Scan the OpenMP clauses in *LIST_P, installing mappings into a new and previous omp contexts. */ static void gimplify_scan_omp_clauses (tree *list_p, tree *pre_p, enum omp_region_type region_type) { struct gimplify_omp_ctx *ctx, *outer_ctx; tree c; ctx = new_omp_context (region_type); outer_ctx = ctx->outer_context; while ((c = *list_p) != NULL) { enum gimplify_status gs; bool remove = false; bool notice_outer = true; const char *check_non_private = NULL; unsigned int flags; tree decl; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: flags = GOVD_PRIVATE | GOVD_EXPLICIT; if (lang_hooks.decls.omp_private_outer_ref (OMP_CLAUSE_DECL (c))) { flags |= GOVD_PRIVATE_OUTER_REF; OMP_CLAUSE_PRIVATE_OUTER_REF (c) = 1; } else notice_outer = false; goto do_add; case OMP_CLAUSE_SHARED: flags = GOVD_SHARED | GOVD_EXPLICIT; goto do_add; case OMP_CLAUSE_FIRSTPRIVATE: flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT; check_non_private = "firstprivate"; goto do_add; case OMP_CLAUSE_LASTPRIVATE: flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT; check_non_private = "lastprivate"; goto do_add; case OMP_CLAUSE_REDUCTION: flags = GOVD_REDUCTION | GOVD_SEEN | GOVD_EXPLICIT; check_non_private = "reduction"; goto do_add; do_add: decl = OMP_CLAUSE_DECL (c); if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) { remove = true; break; } omp_add_variable (ctx, decl, flags); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_PLACEHOLDER (c), GOVD_LOCAL | GOVD_SEEN); gimplify_omp_ctxp = ctx; push_gimplify_context (); gimplify_stmt (&OMP_CLAUSE_REDUCTION_INIT (c)); pop_gimplify_context (OMP_CLAUSE_REDUCTION_INIT (c)); push_gimplify_context (); gimplify_stmt (&OMP_CLAUSE_REDUCTION_MERGE (c)); pop_gimplify_context (OMP_CLAUSE_REDUCTION_MERGE (c)); gimplify_omp_ctxp = outer_ctx; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_STMT (c)) { gimplify_omp_ctxp = ctx; push_gimplify_context (); if (TREE_CODE (OMP_CLAUSE_LASTPRIVATE_STMT (c)) != BIND_EXPR) { tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; BIND_EXPR_BODY (bind) = OMP_CLAUSE_LASTPRIVATE_STMT (c); OMP_CLAUSE_LASTPRIVATE_STMT (c) = bind; } gimplify_stmt (&OMP_CLAUSE_LASTPRIVATE_STMT (c)); pop_gimplify_context (OMP_CLAUSE_LASTPRIVATE_STMT (c)); gimplify_omp_ctxp = outer_ctx; } if (notice_outer) goto do_notice; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: decl = OMP_CLAUSE_DECL (c); if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) { remove = true; break; } do_notice: if (outer_ctx) omp_notice_variable (outer_ctx, decl, true); if (check_non_private && region_type == ORT_WORKSHARE && omp_check_private (ctx, decl)) { error ("%s variable %qs is private in outer context", check_non_private, IDENTIFIER_POINTER (DECL_NAME (decl))); remove = true; } break; case OMP_CLAUSE_IF: OMP_CLAUSE_OPERAND (c, 0) = gimple_boolify (OMP_CLAUSE_OPERAND (c, 0)); /* Fall through. */ case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NUM_THREADS: gs = gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL, is_gimple_val, fb_rvalue); if (gs == GS_ERROR) remove = true; break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: break; case OMP_CLAUSE_DEFAULT: ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c); break; default: gcc_unreachable (); } if (remove) *list_p = OMP_CLAUSE_CHAIN (c); else list_p = &OMP_CLAUSE_CHAIN (c); } gimplify_omp_ctxp = ctx; } /* For all variables that were not actually used within the context, remove PRIVATE, SHARED, and FIRSTPRIVATE clauses. */ static int gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data) { tree *list_p = (tree *) data; tree decl = (tree) n->key; unsigned flags = n->value; enum omp_clause_code code; tree clause; bool private_debug; if (flags & (GOVD_EXPLICIT | GOVD_LOCAL)) return 0; if ((flags & GOVD_SEEN) == 0) return 0; if (flags & GOVD_DEBUG_PRIVATE) { gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE); private_debug = true; } else private_debug = lang_hooks.decls.omp_private_debug_clause (decl, !!(flags & GOVD_SHARED)); if (private_debug) code = OMP_CLAUSE_PRIVATE; else if (flags & GOVD_SHARED) { if (is_global_var (decl)) return 0; code = OMP_CLAUSE_SHARED; } else if (flags & GOVD_PRIVATE) code = OMP_CLAUSE_PRIVATE; else if (flags & GOVD_FIRSTPRIVATE) code = OMP_CLAUSE_FIRSTPRIVATE; else gcc_unreachable (); clause = build_omp_clause (code); OMP_CLAUSE_DECL (clause) = decl; OMP_CLAUSE_CHAIN (clause) = *list_p; if (private_debug) OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1; else if (code == OMP_CLAUSE_PRIVATE && (flags & GOVD_PRIVATE_OUTER_REF)) OMP_CLAUSE_PRIVATE_OUTER_REF (clause) = 1; *list_p = clause; lang_hooks.decls.omp_finish_clause (clause); return 0; } static void gimplify_adjust_omp_clauses (tree *list_p) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; tree c, decl; while ((c = *list_p) != NULL) { splay_tree_node n; bool remove = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); remove = !(n->value & GOVD_SEEN); if (! remove) { bool shared = OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED; if ((n->value & GOVD_DEBUG_PRIVATE) || lang_hooks.decls.omp_private_debug_clause (decl, shared)) { gcc_assert ((n->value & GOVD_DEBUG_PRIVATE) == 0 || ((n->value & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE)); OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_PRIVATE_DEBUG (c) = 1; } } break; case OMP_CLAUSE_LASTPRIVATE: /* Make sure OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE is set to accurately reflect the presence of a FIRSTPRIVATE clause. */ decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c) = (n->value & GOVD_FIRSTPRIVATE) != 0; break; case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: break; default: gcc_unreachable (); } if (remove) *list_p = OMP_CLAUSE_CHAIN (c); else list_p = &OMP_CLAUSE_CHAIN (c); } /* Add in any implicit data sharing. */ splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, list_p); gimplify_omp_ctxp = ctx->outer_context; delete_omp_context (ctx); } /* Gimplify the contents of an OMP_PARALLEL statement. This involves gimplification of the body, as well as scanning the body for used variables. We need to do this scan now, because variable-sized decls will be decomposed during gimplification. */ static enum gimplify_status gimplify_omp_parallel (tree *expr_p, tree *pre_p) { tree expr = *expr_p; gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p, OMP_PARALLEL_COMBINED (expr) ? ORT_COMBINED_PARALLEL : ORT_PARALLEL); push_gimplify_context (); gimplify_stmt (&OMP_PARALLEL_BODY (expr)); if (TREE_CODE (OMP_PARALLEL_BODY (expr)) == BIND_EXPR) pop_gimplify_context (OMP_PARALLEL_BODY (expr)); else pop_gimplify_context (NULL_TREE); gimplify_adjust_omp_clauses (&OMP_PARALLEL_CLAUSES (expr)); return GS_ALL_DONE; } /* Gimplify the contents of an OMP_TASK statement. This involves gimplification of the body, as well as scanning the body for used variables. We need to do this scan now, because variable-sized decls will be decomposed during gimplification. */ static enum gimplify_status gimplify_omp_task (tree *expr_p, tree *pre_p) { tree expr = *expr_p; gimplify_scan_omp_clauses (&OMP_TASK_CLAUSES (expr), pre_p, ORT_TASK); push_gimplify_context (); gimplify_stmt (&OMP_TASK_BODY (expr)); if (TREE_CODE (OMP_TASK_BODY (expr)) == BIND_EXPR) pop_gimplify_context (OMP_TASK_BODY (expr)); else pop_gimplify_context (NULL_TREE); gimplify_adjust_omp_clauses (&OMP_TASK_CLAUSES (expr)); return GS_ALL_DONE; } /* Gimplify the gross structure of an OMP_FOR statement. */ static enum gimplify_status gimplify_omp_for (tree *expr_p, tree *pre_p) { tree for_stmt, decl, t; enum gimplify_status ret = GS_OK; int i; for_stmt = *expr_p; gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p, ORT_WORKSHARE); /* If OMP_FOR is re-gimplified, ensure all variables in pre-body are noticed. */ //? gimplify_stmt (&OMP_FOR_PRE_BODY (for_stmt)); gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == TREE_VEC_LENGTH (OMP_FOR_COND (for_stmt))); gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == TREE_VEC_LENGTH (OMP_FOR_INCR (for_stmt))); for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++) { t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); decl = TREE_OPERAND (t, 0); gcc_assert (DECL_P (decl)); gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl)) || POINTER_TYPE_P (TREE_TYPE (decl))); /* Make sure the iteration variable is private. */ if (omp_is_private (gimplify_omp_ctxp, decl)) omp_notice_variable (gimplify_omp_ctxp, decl, true); else omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN); ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i); gcc_assert (COMPARISON_CLASS_P (t)); gcc_assert (TREE_OPERAND (t, 0) == decl); ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i); switch (TREE_CODE (t)) { case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: t = build_int_cst (TREE_TYPE (decl), 1); goto build_modify; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: t = build_int_cst (TREE_TYPE (decl), -1); goto build_modify; build_modify: t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); t = build2 (MODIFY_EXPR, void_type_node, decl, t); TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t; break; case MODIFY_EXPR: gcc_assert (TREE_OPERAND (t, 0) == decl); t = TREE_OPERAND (t, 1); switch (TREE_CODE (t)) { case PLUS_EXPR: if (TREE_OPERAND (t, 1) == decl) { TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = decl; break; } case MINUS_EXPR: case POINTER_PLUS_EXPR: gcc_assert (TREE_OPERAND (t, 0) == decl); break; default: gcc_unreachable (); } ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); break; default: gcc_unreachable (); } } gimplify_to_stmt_list (&OMP_FOR_BODY (for_stmt)); gimplify_adjust_omp_clauses (&OMP_FOR_CLAUSES (for_stmt)); return ret == GS_ALL_DONE ? GS_ALL_DONE : GS_ERROR; } /* Gimplify the gross structure of other OpenMP worksharing constructs. In particular, OMP_SECTIONS and OMP_SINGLE. */ static enum gimplify_status gimplify_omp_workshare (tree *expr_p, tree *pre_p) { tree stmt = *expr_p; gimplify_scan_omp_clauses (&OMP_CLAUSES (stmt), pre_p, ORT_WORKSHARE); gimplify_to_stmt_list (&OMP_BODY (stmt)); gimplify_adjust_omp_clauses (&OMP_CLAUSES (stmt)); return GS_ALL_DONE; } /* A subroutine of gimplify_omp_atomic. The front end is supposed to have stabilized the lhs of the atomic operation as *ADDR. Return true if EXPR is this stabilized form. */ static bool goa_lhs_expr_p (tree expr, tree addr) { /* Also include casts to other type variants. The C front end is fond of adding these for e.g. volatile variables. This is like STRIP_TYPE_NOPS but includes the main variant lookup. */ while ((TREE_CODE (expr) == NOP_EXPR || TREE_CODE (expr) == CONVERT_EXPR || TREE_CODE (expr) == NON_LVALUE_EXPR) && TREE_OPERAND (expr, 0) != error_mark_node && (TYPE_MAIN_VARIANT (TREE_TYPE (expr)) == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (expr, 0))))) expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == INDIRECT_REF && TREE_OPERAND (expr, 0) == addr) return true; if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0)) return true; return false; } /* A subroutine of gimplify_omp_atomic. Attempt to implement the atomic operation as a __sync_fetch_and_op builtin. INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. Returns GS_UNHANDLED if the expression is not of the proper form. */ static enum gimplify_status gimplify_omp_atomic_fetch_op (tree *expr_p, tree addr, tree rhs, int index) { enum built_in_function base; tree decl, args, itype; enum insn_code *optab; /* Check for one of the supported fetch-op operations. */ switch (TREE_CODE (rhs)) { case PLUS_EXPR: base = BUILT_IN_FETCH_AND_ADD_N; optab = sync_add_optab; break; case MINUS_EXPR: base = BUILT_IN_FETCH_AND_SUB_N; optab = sync_add_optab; break; case BIT_AND_EXPR: base = BUILT_IN_FETCH_AND_AND_N; optab = sync_and_optab; break; case BIT_IOR_EXPR: base = BUILT_IN_FETCH_AND_OR_N; optab = sync_ior_optab; break; case BIT_XOR_EXPR: base = BUILT_IN_FETCH_AND_XOR_N; optab = sync_xor_optab; break; default: return GS_UNHANDLED; } /* Make sure the expression is of the proper form. */ if (goa_lhs_expr_p (TREE_OPERAND (rhs, 0), addr)) rhs = TREE_OPERAND (rhs, 1); else if (commutative_tree_code (TREE_CODE (rhs)) && goa_lhs_expr_p (TREE_OPERAND (rhs, 1), addr)) rhs = TREE_OPERAND (rhs, 0); else return GS_UNHANDLED; decl = built_in_decls[base + index + 1]; itype = TREE_TYPE (TREE_TYPE (decl)); if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing) return GS_UNHANDLED; args = tree_cons (NULL, fold_convert (itype, rhs), NULL); args = tree_cons (NULL, addr, args); *expr_p = build_function_call_expr (decl, args); return GS_OK; } /* A subroutine of gimplify_omp_atomic_pipeline. Walk *EXPR_P and replace appearances of *LHS_ADDR with LHS_VAR. If an expression does not involve the lhs, evaluate it into a temporary. Return 1 if the lhs appeared as a subexpression, 0 if it did not, or -1 if an error was encountered. */ static int goa_stabilize_expr (tree *expr_p, tree *pre_p, tree lhs_addr, tree lhs_var) { tree expr = *expr_p; int saw_lhs; if (goa_lhs_expr_p (expr, lhs_addr)) { *expr_p = lhs_var; return 1; } if (is_gimple_val (expr)) return 0; saw_lhs = 0; switch (TREE_CODE_CLASS (TREE_CODE (expr))) { case tcc_binary: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr, lhs_var); case tcc_unary: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr, lhs_var); break; default: break; } if (saw_lhs == 0) { enum gimplify_status gs; gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue); if (gs != GS_ALL_DONE) saw_lhs = -1; } return saw_lhs; } /* A subroutine of gimplify_omp_atomic. Implement the atomic operation as: oldval = *addr; repeat: newval = rhs; // with oldval replacing *addr in rhs oldval = __sync_val_compare_and_swap (addr, oldval, newval); if (oldval != newval) goto repeat; INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. */ static enum gimplify_status gimplify_omp_atomic_pipeline (tree *expr_p, tree *pre_p, tree addr, tree rhs, int index) { tree oldval, oldival, oldival2, newval, newival, label; tree type, itype, cmpxchg, args, x, iaddr; cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1]; type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); itype = TREE_TYPE (TREE_TYPE (cmpxchg)); if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing) return GS_UNHANDLED; oldval = create_tmp_var (type, NULL); newval = create_tmp_var (type, NULL); /* Precompute as much of RHS as possible. In the same walk, replace occurrences of the lhs value with our temporary. */ if (goa_stabilize_expr (&rhs, pre_p, addr, oldval) < 0) return GS_ERROR; x = build_fold_indirect_ref (addr); x = build2 (MODIFY_EXPR, void_type_node, oldval, x); gimplify_and_add (x, pre_p); /* For floating-point values, we'll need to view-convert them to integers so that we can perform the atomic compare and swap. Simplify the following code by always setting up the "i"ntegral variables. */ if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) { oldival = oldval; newival = newval; iaddr = addr; } else { oldival = create_tmp_var (itype, NULL); newival = create_tmp_var (itype, NULL); x = build1 (VIEW_CONVERT_EXPR, itype, oldval); x = build2 (MODIFY_EXPR, void_type_node, oldival, x); gimplify_and_add (x, pre_p); iaddr = fold_convert (build_pointer_type (itype), addr); } oldival2 = create_tmp_var (itype, NULL); label = create_artificial_label (); x = build1 (LABEL_EXPR, void_type_node, label); gimplify_and_add (x, pre_p); x = build2 (MODIFY_EXPR, void_type_node, newval, rhs); gimplify_and_add (x, pre_p); if (newval != newival) { x = build1 (VIEW_CONVERT_EXPR, itype, newval); x = build2 (MODIFY_EXPR, void_type_node, newival, x); gimplify_and_add (x, pre_p); } x = build2 (MODIFY_EXPR, void_type_node, oldival2, fold_convert (itype, oldival)); gimplify_and_add (x, pre_p); args = tree_cons (NULL, fold_convert (itype, newival), NULL); args = tree_cons (NULL, fold_convert (itype, oldival), args); args = tree_cons (NULL, iaddr, args); x = build_function_call_expr (cmpxchg, args); if (oldval == oldival) x = fold_convert (type, x); x = build2 (MODIFY_EXPR, void_type_node, oldival, x); gimplify_and_add (x, pre_p); /* For floating point, be prepared for the loop backedge. */ if (oldval != oldival) { x = build1 (VIEW_CONVERT_EXPR, type, oldival); x = build2 (MODIFY_EXPR, void_type_node, oldval, x); gimplify_and_add (x, pre_p); } /* Note that we always perform the comparison as an integer, even for floating point. This allows the atomic operation to properly succeed even with NaNs and -0.0. */ x = build3 (COND_EXPR, void_type_node, build2 (NE_EXPR, boolean_type_node, oldival, oldival2), build1 (GOTO_EXPR, void_type_node, label), NULL); gimplify_and_add (x, pre_p); *expr_p = NULL; return GS_ALL_DONE; } /* A subroutine of gimplify_omp_atomic. Implement the atomic operation as: GOMP_atomic_start (); *addr = rhs; GOMP_atomic_end (); The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to responses received from omp@openmp.org, appears to be within spec. Which makes sense, since that's how several other compilers handle this situation as well. */ static enum gimplify_status gimplify_omp_atomic_mutex (tree *expr_p, tree *pre_p, tree addr, tree rhs) { tree t; t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, pre_p); t = build_fold_indirect_ref (addr); t = build2 (MODIFY_EXPR, void_type_node, t, rhs); gimplify_and_add (t, pre_p); t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, pre_p); *expr_p = NULL; return GS_ALL_DONE; } /* Gimplify an OMP_ATOMIC statement. */ static enum gimplify_status gimplify_omp_atomic (tree *expr_p, tree *pre_p) { tree addr = TREE_OPERAND (*expr_p, 0); tree rhs = TREE_OPERAND (*expr_p, 1); tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ index = tree_low_cst (TYPE_SIZE_UNIT (type), 1); index = exact_log2 (index); if (index >= 0 && index <= 4) { enum gimplify_status gs; unsigned int align; if (DECL_P (TREE_OPERAND (addr, 0))) align = DECL_ALIGN_UNIT (TREE_OPERAND (addr, 0)); else if (TREE_CODE (TREE_OPERAND (addr, 0)) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (TREE_OPERAND (addr, 0), 1)) == FIELD_DECL) align = DECL_ALIGN_UNIT (TREE_OPERAND (TREE_OPERAND (addr, 0), 1)); else align = TYPE_ALIGN_UNIT (type); /* __sync builtins require strict data alignment. */ if (exact_log2 (align) >= index) { /* When possible, use specialized atomic update functions. */ if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) { gs = gimplify_omp_atomic_fetch_op (expr_p, addr, rhs, index); if (gs != GS_UNHANDLED) return gs; } /* If we don't have specialized __sync builtins, try and implement as a compare and swap loop. */ gs = gimplify_omp_atomic_pipeline (expr_p, pre_p, addr, rhs, index); if (gs != GS_UNHANDLED) return gs; } } /* The ultimate fallback is wrapping the operation in a mutex. */ return gimplify_omp_atomic_mutex (expr_p, pre_p, addr, rhs); } /* Gimplifies the expression tree pointed to by EXPR_P. Return 0 if gimplification failed. PRE_P points to the list where side effects that must happen before EXPR should be stored. POST_P points to the list where side effects that must happen after EXPR should be stored, or NULL if there is no suitable list. In that case, we copy the result to a temporary, emit the post-effects, and then return the temporary. GIMPLE_TEST_F points to a function that takes a tree T and returns nonzero if T is in the GIMPLE form requested by the caller. The GIMPLE predicates are in tree-gimple.c. This test is used twice. Before gimplification, the test is invoked to determine whether *EXPR_P is already gimple enough. If that fails, *EXPR_P is gimplified according to its code and GIMPLE_TEST_F is called again. If the test still fails, then a new temporary variable is created and assigned the value of the gimplified expression. FALLBACK tells the function what sort of a temporary we want. If the 1 bit is set, an rvalue is OK. If the 2 bit is set, an lvalue is OK. If both are set, either is OK, but an lvalue is preferable. The return value is either GS_ERROR or GS_ALL_DONE, since this function iterates until solution. */ enum gimplify_status gimplify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool (* gimple_test_f) (tree), fallback_t fallback) { tree tmp; tree internal_pre = NULL_TREE; tree internal_post = NULL_TREE; tree save_expr; int is_statement = (pre_p == NULL); location_t saved_location; enum gimplify_status ret; save_expr = *expr_p; if (save_expr == NULL_TREE) return GS_ALL_DONE; /* We used to check the predicate here and return immediately if it succeeds. This is wrong; the design is for gimplification to be idempotent, and for the predicates to only test for valid forms, not whether they are fully simplified. */ /* Set up our internal queues if needed. */ if (pre_p == NULL) pre_p = &internal_pre; if (post_p == NULL) post_p = &internal_post; saved_location = input_location; if (save_expr != error_mark_node && EXPR_HAS_LOCATION (*expr_p)) input_location = EXPR_LOCATION (*expr_p); /* Loop over the specific gimplifiers until the toplevel node remains the same. */ do { /* Strip away as many useless type conversions as possible at the toplevel. */ STRIP_USELESS_TYPE_CONVERSION (*expr_p); /* Remember the expr. */ save_expr = *expr_p; /* Die, die, die, my darling. */ if (save_expr == error_mark_node || (TREE_TYPE (save_expr) && TREE_TYPE (save_expr) == error_mark_node)) { ret = GS_ERROR; break; } /* Do any language-specific gimplification. */ ret = lang_hooks.gimplify_expr (expr_p, pre_p, post_p); if (ret == GS_OK) { if (*expr_p == NULL_TREE) break; if (*expr_p != save_expr) continue; } else if (ret != GS_UNHANDLED) break; ret = GS_OK; switch (TREE_CODE (*expr_p)) { /* First deal with the special cases. */ case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: ret = gimplify_self_mod_expr (expr_p, pre_p, post_p, fallback != fb_none); break; case ARRAY_REF: case ARRAY_RANGE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case VIEW_CONVERT_EXPR: ret = gimplify_compound_lval (expr_p, pre_p, post_p, fallback ? fallback : fb_rvalue); break; case COND_EXPR: ret = gimplify_cond_expr (expr_p, pre_p, fallback); /* C99 code may assign to an array in a structure value of a conditional expression, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } break; case CALL_EXPR: ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none); /* C99 code may assign to an array in a structure returned from a function, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } break; case TREE_LIST: gcc_unreachable (); case COMPOUND_EXPR: ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none); break; case MODIFY_EXPR: case INIT_EXPR: ret = gimplify_modify_expr (expr_p, pre_p, post_p, fallback != fb_none); /* The distinction between MODIFY_EXPR and INIT_EXPR is no longer useful. */ if (*expr_p && TREE_CODE (*expr_p) == INIT_EXPR) TREE_SET_CODE (*expr_p, MODIFY_EXPR); break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: ret = gimplify_boolean_expr (expr_p); break; case TRUTH_NOT_EXPR: TREE_OPERAND (*expr_p, 0) = gimple_boolify (TREE_OPERAND (*expr_p, 0)); ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case ADDR_EXPR: ret = gimplify_addr_expr (expr_p, pre_p, post_p); break; case VA_ARG_EXPR: ret = gimplify_va_arg_expr (expr_p, pre_p, post_p); break; case CONVERT_EXPR: case NOP_EXPR: if (IS_EMPTY_STMT (*expr_p)) { ret = GS_ALL_DONE; break; } if (VOID_TYPE_P (TREE_TYPE (*expr_p)) || fallback == fb_none) { /* Just strip a conversion to void (or in void context) and try again. */ *expr_p = TREE_OPERAND (*expr_p, 0); break; } ret = gimplify_conversion (expr_p); if (ret == GS_ERROR) break; if (*expr_p != save_expr) break; /* FALLTHRU */ case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: /* unary_expr: ... | '(' cast ')' val | ... */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case INDIRECT_REF: *expr_p = fold_indirect_ref (*expr_p); if (*expr_p != save_expr) break; /* else fall through. */ case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_reg, fb_rvalue); recalculate_side_effects (*expr_p); break; /* Constants need not be gimplified. */ case INTEGER_CST: case REAL_CST: case STRING_CST: case COMPLEX_CST: case VECTOR_CST: ret = GS_ALL_DONE; break; case CONST_DECL: /* If we require an lvalue, such as for ADDR_EXPR, retain the CONST_DECL node. Otherwise the decl is replaceable by its value. */ /* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */ if (fallback & fb_lvalue) ret = GS_ALL_DONE; else *expr_p = DECL_INITIAL (*expr_p); break; case DECL_EXPR: ret = gimplify_decl_expr (expr_p); break; case EXC_PTR_EXPR: /* FIXME make this a decl. */ ret = GS_ALL_DONE; break; case BIND_EXPR: ret = gimplify_bind_expr (expr_p, pre_p); break; case LOOP_EXPR: ret = gimplify_loop_expr (expr_p, pre_p); break; case SWITCH_EXPR: ret = gimplify_switch_expr (expr_p, pre_p); break; case EXIT_EXPR: ret = gimplify_exit_expr (expr_p); break; case GOTO_EXPR: /* If the target is not LABEL, then it is a computed jump and the target needs to be gimplified. */ if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL) ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p, NULL, is_gimple_val, fb_rvalue); break; case LABEL_EXPR: ret = GS_ALL_DONE; gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p)) == current_function_decl); break; case CASE_LABEL_EXPR: ret = gimplify_case_label_expr (expr_p); break; case RETURN_EXPR: ret = gimplify_return_expr (*expr_p, pre_p); break; case CONSTRUCTOR: /* Don't reduce this in place; let gimplify_init_constructor work its magic. Buf if we're just elaborating this for side effects, just gimplify any element that has side-effects. */ if (fallback == fb_none) { unsigned HOST_WIDE_INT ix; constructor_elt *ce; tree temp = NULL_TREE; for (ix = 0; VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (*expr_p), ix, ce); ix++) if (TREE_SIDE_EFFECTS (ce->value)) append_to_statement_list (ce->value, &temp); *expr_p = temp; ret = GS_OK; } /* C99 code may assign to an array in a constructed structure or union, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ else if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } else ret = GS_ALL_DONE; break; /* The following are special cases that are not handled by the original GIMPLE grammar. */ /* SAVE_EXPR nodes are converted into a GIMPLE identifier and eliminated. */ case SAVE_EXPR: ret = gimplify_save_expr (expr_p, pre_p, post_p); break; case BIT_FIELD_REF: { enum gimplify_status r0, r1, r2; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_either); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); ret = MIN (r0, MIN (r1, r2)); } break; case NON_LVALUE_EXPR: /* This should have been stripped above. */ gcc_unreachable (); case ASM_EXPR: ret = gimplify_asm_expr (expr_p, pre_p, post_p); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 0)); gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 1)); ret = GS_ALL_DONE; break; case CLEANUP_POINT_EXPR: ret = gimplify_cleanup_point_expr (expr_p, pre_p); break; case TARGET_EXPR: ret = gimplify_target_expr (expr_p, pre_p, post_p); break; case CATCH_EXPR: gimplify_to_stmt_list (&CATCH_BODY (*expr_p)); ret = GS_ALL_DONE; break; case EH_FILTER_EXPR: gimplify_to_stmt_list (&EH_FILTER_FAILURE (*expr_p)); ret = GS_ALL_DONE; break; case OBJ_TYPE_REF: { enum gimplify_status r0, r1; r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); } break; case LABEL_DECL: /* We get here when taking the address of a label. We mark the label as "forced"; meaning it can never be removed and it is a potential target for any computed goto. */ FORCED_LABEL (*expr_p) = 1; ret = GS_ALL_DONE; break; case STATEMENT_LIST: ret = gimplify_statement_list (expr_p, pre_p); break; case WITH_SIZE_EXPR: { gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p == &internal_post ? NULL : post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); } break; case VAR_DECL: case PARM_DECL: ret = gimplify_var_or_parm_decl (expr_p); break; case RESULT_DECL: /* When within an OpenMP context, notice uses of variables. */ if (gimplify_omp_ctxp) omp_notice_variable (gimplify_omp_ctxp, *expr_p, true); ret = GS_ALL_DONE; break; case SSA_NAME: /* Allow callbacks into the gimplifier during optimization. */ ret = GS_ALL_DONE; break; case OMP_PARALLEL: ret = gimplify_omp_parallel (expr_p, pre_p); break; case OMP_TASK: ret = gimplify_omp_task (expr_p, pre_p); break; case OMP_FOR: ret = gimplify_omp_for (expr_p, pre_p); break; case OMP_SECTIONS: case OMP_SINGLE: ret = gimplify_omp_workshare (expr_p, pre_p); break; case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: gimplify_to_stmt_list (&OMP_BODY (*expr_p)); break; case OMP_ATOMIC: ret = gimplify_omp_atomic (expr_p, pre_p); break; case OMP_RETURN: case OMP_CONTINUE: ret = GS_ALL_DONE; break; default: switch (TREE_CODE_CLASS (TREE_CODE (*expr_p))) { case tcc_comparison: /* Handle comparison of objects of non scalar mode aggregates with a call to memcmp. It would be nice to only have to do this for variable-sized objects, but then we'd have to allow the same nest of reference nodes we allow for MODIFY_EXPR and that's too complex. Compare scalar mode aggregates as scalar mode values. Using memcmp for them would be very inefficient at best, and is plain wrong if bitfields are involved. */ { tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1)); if (!AGGREGATE_TYPE_P (type)) goto expr_2; else if (TYPE_MODE (type) != BLKmode) ret = gimplify_scalar_mode_aggregate_compare (expr_p); else ret = gimplify_variable_sized_compare (expr_p); break; } /* If *EXPR_P does not need to be special-cased, handle it according to its class. */ case tcc_unary: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); break; case tcc_binary: expr_2: { enum gimplify_status r0, r1; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); break; } case tcc_declaration: case tcc_constant: ret = GS_ALL_DONE; goto dont_recalculate; default: gcc_assert (TREE_CODE (*expr_p) == TRUTH_AND_EXPR || TREE_CODE (*expr_p) == TRUTH_OR_EXPR || TREE_CODE (*expr_p) == TRUTH_XOR_EXPR); goto expr_2; } recalculate_side_effects (*expr_p); dont_recalculate: break; } /* If we replaced *expr_p, gimplify again. */ if (ret == GS_OK && (*expr_p == NULL || *expr_p == save_expr)) ret = GS_ALL_DONE; } while (ret == GS_OK); /* If we encountered an error_mark somewhere nested inside, either stub out the statement or propagate the error back out. */ if (ret == GS_ERROR) { if (is_statement) *expr_p = NULL; goto out; } /* This was only valid as a return value from the langhook, which we handled. Make sure it doesn't escape from any other context. */ gcc_assert (ret != GS_UNHANDLED); if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p)) { /* We aren't looking for a value, and we don't have a valid statement. If it doesn't have side-effects, throw it away. */ if (!TREE_SIDE_EFFECTS (*expr_p)) *expr_p = NULL; else if (!TREE_THIS_VOLATILE (*expr_p)) { /* This is probably a _REF that contains something nested that has side effects. Recurse through the operands to find it. */ enum tree_code code = TREE_CODE (*expr_p); switch (code) { case COMPONENT_REF: case REALPART_EXPR: case IMAGPART_EXPR: case VIEW_CONVERT_EXPR: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); break; case ARRAY_REF: case ARRAY_RANGE_REF: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, gimple_test_f, fallback); break; default: /* Anything else with side-effects must be converted to a valid statement before we get here. */ gcc_unreachable (); } *expr_p = NULL; } else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p)) && TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode) { /* Historically, the compiler has treated a bare reference to a non-BLKmode volatile lvalue as forcing a load. */ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p)); /* Normally, we do not want to create a temporary for a TREE_ADDRESSABLE type because such a type should not be copied by bitwise-assignment. However, we make an exception here, as all we are doing here is ensuring that we read the bytes that make up the type. We use create_tmp_var_raw because create_tmp_var will abort when given a TREE_ADDRESSABLE type. */ tree tmp = create_tmp_var_raw (type, "vol"); gimple_add_tmp_var (tmp); *expr_p = build2 (MODIFY_EXPR, type, tmp, *expr_p); } else /* We can't do anything useful with a volatile reference to an incomplete type, so just throw it away. Likewise for a BLKmode type, since any implicit inner load should already have been turned into an explicit one by the gimplification process. */ *expr_p = NULL; } /* If we are gimplifying at the statement level, we're done. Tack everything together and replace the original statement with the gimplified form. */ if (fallback == fb_none || is_statement) { if (internal_pre || internal_post) { append_to_statement_list (*expr_p, &internal_pre); append_to_statement_list (internal_post, &internal_pre); annotate_all_with_locus (&internal_pre, input_location); *expr_p = internal_pre; } else if (!*expr_p) ; else if (TREE_CODE (*expr_p) == STATEMENT_LIST) annotate_all_with_locus (expr_p, input_location); else annotate_one_with_locus (*expr_p, input_location); goto out; } /* Otherwise we're gimplifying a subexpression, so the resulting value is interesting. */ /* If it's sufficiently simple already, we're done. Unless we are handling some post-effects internally; if that's the case, we need to copy into a temp before adding the post-effects to the tree. */ if (!internal_post && (*gimple_test_f) (*expr_p)) goto out; /* Otherwise, we need to create a new temporary for the gimplified expression. */ /* We can't return an lvalue if we have an internal postqueue. The object the lvalue refers to would (probably) be modified by the postqueue; we need to copy the value out first, which means an rvalue. */ if ((fallback & fb_lvalue) && !internal_post && is_gimple_addressable (*expr_p)) { /* An lvalue will do. Take the address of the expression, store it in a temporary, and replace the expression with an INDIRECT_REF of that temporary. */ tmp = build_fold_addr_expr (*expr_p); gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue); *expr_p = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (tmp)), tmp); } else if ((fallback & fb_rvalue) && is_gimple_formal_tmp_rhs (*expr_p)) { gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p))); /* An rvalue will do. Assign the gimplified expression into a new temporary TMP and replace the original expression with TMP. */ if (internal_post || (fallback & fb_lvalue)) /* The postqueue might change the value of the expression between the initialization and use of the temporary, so we can't use a formal temp. FIXME do we care? */ *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); else *expr_p = get_formal_tmp_var (*expr_p, pre_p); if (TREE_CODE (*expr_p) != SSA_NAME) DECL_GIMPLE_FORMAL_TEMP_P (*expr_p) = 1; } else { #ifdef ENABLE_CHECKING if (!(fallback & fb_mayfail)) { fprintf (stderr, "gimplification failed:\n"); print_generic_expr (stderr, *expr_p, 0); debug_tree (*expr_p); internal_error ("gimplification failed"); } #endif gcc_assert (fallback & fb_mayfail); /* If this is an asm statement, and the user asked for the impossible, don't die. Fail and let gimplify_asm_expr issue an error. */ ret = GS_ERROR; goto out; } /* Make sure the temporary matches our predicate. */ gcc_assert ((*gimple_test_f) (*expr_p)); if (internal_post) { annotate_all_with_locus (&internal_post, input_location); append_to_statement_list (internal_post, pre_p); } out: input_location = saved_location; return ret; } /* Look through TYPE for variable-sized objects and gimplify each such size that we find. Add to LIST_P any statements generated. */ void gimplify_type_sizes (tree type, tree *list_p) { tree field, t; if (type == NULL || type == error_mark_node) return; /* We first do the main variant, then copy into any other variants. */ type = TYPE_MAIN_VARIANT (type); /* Avoid infinite recursion. */ if (TYPE_SIZES_GIMPLIFIED (type)) return; TYPE_SIZES_GIMPLIFIED (type) = 1; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p); gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type); TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type); } break; case ARRAY_TYPE: /* These types may not have declarations, so handle them here. */ gimplify_type_sizes (TREE_TYPE (type), list_p); gimplify_type_sizes (TYPE_DOMAIN (type), list_p); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p); gimplify_type_sizes (TREE_TYPE (field), list_p); } break; case POINTER_TYPE: case REFERENCE_TYPE: /* We used to recurse on the pointed-to type here, which turned out to be incorrect because its definition might refer to variables not yet initialized at this point if a forward declaration is involved. It was actually useful for anonymous pointed-to types to ensure that the sizes evaluation dominates every possible later use of the values. Restricting to such types here would be safe since there is no possible forward declaration around, but would introduce an undesirable middle-end semantic to anonymity. We then defer to front-ends the responsibility of ensuring that the sizes are evaluated both early and late enough, e.g. by attaching artificial type declarations to the tree. */ break; default: break; } gimplify_one_sizepos (&TYPE_SIZE (type), list_p); gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_SIZE (t) = TYPE_SIZE (type); TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type); TYPE_SIZES_GIMPLIFIED (t) = 1; } } /* A subroutine of gimplify_type_sizes to make sure that *EXPR_P, a size or position, has had all of its SAVE_EXPRs evaluated. We add any required statements to STMT_P. */ void gimplify_one_sizepos (tree *expr_p, tree *stmt_p) { tree type, expr = *expr_p; /* We don't do anything if the value isn't there, is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do anything if it's already a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier will want to replace it with a new variable, but that will cause problems if this type is from outside the function. It's OK to have that here. */ if (expr == NULL_TREE || TREE_CONSTANT (expr) || TREE_CODE (expr) == VAR_DECL || CONTAINS_PLACEHOLDER_P (expr)) return; type = TREE_TYPE (expr); *expr_p = unshare_expr (expr); gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue); expr = *expr_p; /* Verify that we've an exact type match with the original expression. In particular, we do not wish to drop a "sizetype" in favour of a type of similar dimensions. We don't want to pollute the generic type-stripping code with this knowledge because it doesn't matter for the bulk of GENERIC/GIMPLE. It only matters that TYPE_SIZE_UNIT and friends retain their "sizetype-ness". */ if (TREE_TYPE (expr) != type && TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)) { tree tmp; *expr_p = create_tmp_var (type, NULL); tmp = build1 (NOP_EXPR, type, expr); tmp = build2 (MODIFY_EXPR, type, *expr_p, tmp); if (EXPR_HAS_LOCATION (expr)) SET_EXPR_LOCUS (tmp, EXPR_LOCUS (expr)); else SET_EXPR_LOCATION (tmp, input_location); gimplify_and_add (tmp, stmt_p); } } #ifdef ENABLE_CHECKING /* Compare types A and B for a "close enough" match. */ static bool cpt_same_type (tree a, tree b) { if (lang_hooks.types_compatible_p (a, b)) return true; /* ??? The C++ FE decomposes METHOD_TYPES to FUNCTION_TYPES and doesn't link them together. This routine is intended to catch type errors that will affect the optimizers, and the optimizers don't add new dereferences of function pointers, so ignore it. */ if ((TREE_CODE (a) == FUNCTION_TYPE || TREE_CODE (a) == METHOD_TYPE) && (TREE_CODE (b) == FUNCTION_TYPE || TREE_CODE (b) == METHOD_TYPE)) return true; /* ??? The C FE pushes type qualifiers after the fact into the type of the element from the type of the array. See build_unary_op's handling of ADDR_EXPR. This seems wrong -- if we were going to do this, we should have done it when creating the variable in the first place. Alternately, why aren't the two array types made variants? */ if (TREE_CODE (a) == ARRAY_TYPE && TREE_CODE (b) == ARRAY_TYPE) return cpt_same_type (TREE_TYPE (a), TREE_TYPE (b)); /* And because of those, we have to recurse down through pointers. */ if (POINTER_TYPE_P (a) && POINTER_TYPE_P (b)) return cpt_same_type (TREE_TYPE (a), TREE_TYPE (b)); return false; } /* Check for some cases of the front end missing cast expressions. The type of a dereference should correspond to the pointer type; similarly the type of an address should match its object. */ static tree check_pointer_types_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { tree t = *tp; tree ptype, otype, dtype; switch (TREE_CODE (t)) { case INDIRECT_REF: case ARRAY_REF: otype = TREE_TYPE (t); ptype = TREE_TYPE (TREE_OPERAND (t, 0)); dtype = TREE_TYPE (ptype); gcc_assert (cpt_same_type (otype, dtype)); break; case ADDR_EXPR: ptype = TREE_TYPE (t); otype = TREE_TYPE (TREE_OPERAND (t, 0)); dtype = TREE_TYPE (ptype); if (!cpt_same_type (otype, dtype)) { /* &array is allowed to produce a pointer to the element, rather than a pointer to the array type. We must allow this in order to properly represent assigning the address of an array in C into pointer to the element type. */ gcc_assert (TREE_CODE (otype) == ARRAY_TYPE && POINTER_TYPE_P (ptype) && cpt_same_type (TREE_TYPE (otype), dtype)); break; } break; default: return NULL_TREE; } return NULL_TREE; } #endif /* Gimplify the body of statements pointed to by BODY_P. FNDECL is the function decl containing BODY. */ void gimplify_body (tree *body_p, tree fndecl, bool do_parms) { location_t saved_location = input_location; tree body, parm_stmts; timevar_push (TV_TREE_GIMPLIFY); gcc_assert (gimplify_ctxp == NULL); push_gimplify_context (); /* Unshare most shared trees in the body and in that of any nested functions. It would seem we don't have to do this for nested functions because they are supposed to be output and then the outer function gimplified first, but the g++ front end doesn't always do it that way. */ unshare_body (body_p, fndecl); unvisit_body (body_p, fndecl); /* Make sure input_location isn't set to something wierd. */ input_location = DECL_SOURCE_LOCATION (fndecl); /* Resolve callee-copies. This has to be done before processing the body so that DECL_VALUE_EXPR gets processed correctly. */ parm_stmts = do_parms ? gimplify_parameters () : NULL; /* Gimplify the function's body. */ gimplify_stmt (body_p); body = *body_p; if (!body) body = alloc_stmt_list (); else if (TREE_CODE (body) == STATEMENT_LIST) { tree t = expr_only (*body_p); if (t) body = t; } /* If there isn't an outer BIND_EXPR, add one. */ if (TREE_CODE (body) != BIND_EXPR) { tree b = build3 (BIND_EXPR, void_type_node, NULL_TREE, NULL_TREE, NULL_TREE); TREE_SIDE_EFFECTS (b) = 1; append_to_statement_list_force (body, &BIND_EXPR_BODY (b)); body = b; } /* If we had callee-copies statements, insert them at the beginning of the function. */ if (parm_stmts) { append_to_statement_list_force (BIND_EXPR_BODY (body), &parm_stmts); BIND_EXPR_BODY (body) = parm_stmts; } /* Unshare again, in case gimplification was sloppy. */ unshare_all_trees (body); *body_p = body; pop_gimplify_context (body); gcc_assert (gimplify_ctxp == NULL); #ifdef ENABLE_CHECKING walk_tree (body_p, check_pointer_types_r, NULL, NULL); #endif timevar_pop (TV_TREE_GIMPLIFY); input_location = saved_location; } /* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL node for the function we want to gimplify. */ void gimplify_function_tree (tree fndecl) { tree oldfn, parm, ret; oldfn = current_function_decl; current_function_decl = fndecl; cfun = DECL_STRUCT_FUNCTION (fndecl); if (cfun == NULL) allocate_struct_function (fndecl); for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = TREE_CHAIN (parm)) { /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if (TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE && !TREE_THIS_VOLATILE (parm) && !needs_to_live_in_memory (parm)) DECL_COMPLEX_GIMPLE_REG_P (parm) = 1; } ret = DECL_RESULT (fndecl); if (TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE && !needs_to_live_in_memory (ret)) DECL_COMPLEX_GIMPLE_REG_P (ret) = 1; gimplify_body (&DECL_SAVED_TREE (fndecl), fndecl, true); /* If we're instrumenting function entry/exit, then prepend the call to the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to catch the exit hook. */ /* ??? Add some way to ignore exceptions for this TFE. */ if (flag_instrument_function_entry_exit && ! DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl)) { tree tf, x, bind; tf = build2 (TRY_FINALLY_EXPR, void_type_node, NULL, NULL); TREE_SIDE_EFFECTS (tf) = 1; x = DECL_SAVED_TREE (fndecl); append_to_statement_list (x, &TREE_OPERAND (tf, 0)); x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_EXIT]; x = build_function_call_expr (x, NULL); append_to_statement_list (x, &TREE_OPERAND (tf, 1)); bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_ENTER]; x = build_function_call_expr (x, NULL); append_to_statement_list (x, &BIND_EXPR_BODY (bind)); append_to_statement_list (tf, &BIND_EXPR_BODY (bind)); DECL_SAVED_TREE (fndecl) = bind; } current_function_decl = oldfn; cfun = oldfn ? DECL_STRUCT_FUNCTION (oldfn) : NULL; } /* Expands EXPR to list of gimple statements STMTS. If SIMPLE is true, force the result to be either ssa_name or an invariant, otherwise just force it to be a rhs expression. If VAR is not NULL, make the base variable of the final destination be VAR if suitable. */ tree force_gimple_operand (tree expr, tree *stmts, bool simple, tree var) { tree t; enum gimplify_status ret; gimple_predicate gimple_test_f; *stmts = NULL_TREE; if (is_gimple_val (expr)) return expr; gimple_test_f = simple ? is_gimple_val : is_gimple_reg_rhs; push_gimplify_context (); gimplify_ctxp->into_ssa = in_ssa_p; if (var) expr = build2 (MODIFY_EXPR, TREE_TYPE (var), var, expr); if (TREE_CODE (expr) != MODIFY_EXPR && TREE_TYPE (expr) == void_type_node) { gimplify_and_add (expr, stmts); expr = NULL_TREE; } else { ret = gimplify_expr (&expr, stmts, NULL, gimple_test_f, fb_rvalue); gcc_assert (ret != GS_ERROR); } if (referenced_vars) { for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t)) add_referenced_var (t); } pop_gimplify_context (NULL); return expr; } /* Invokes force_gimple_operand for EXPR with parameters SIMPLE_P and VAR. If some statements are produced, emits them before BSI. */ tree force_gimple_operand_bsi (block_stmt_iterator *bsi, tree expr, bool simple_p, tree var) { tree stmts; expr = force_gimple_operand (expr, &stmts, simple_p, var); if (stmts) bsi_insert_before (bsi, stmts, BSI_SAME_STMT); return expr; } #include "gt-gimplify.h"
SoftMax.c
#include "../thnets.h" int nnload_SoftMax(struct module *mod, struct nnmodule *n) { mod->type = MT_SoftMax; mod->updateOutput = nn_SoftMax_updateOutput; return 0; } void pyload_SoftMax(struct pyfunction *f) { f->module.updateOutput = nn_SoftMax_updateOutput; f->module.type = MT_SoftMax; } #ifdef ONNX void onnxload_SoftMax(const void *graph, struct module *m, int nodeidx) { m->updateOutput = nn_SoftMax_updateOutput; m->type = MT_SoftMax; } #endif THFloatTensor *nn_SoftMax_updateOutput(struct module *module, THFloatTensor *input) { THFloatTensor *output = module->output; float *input_data, *output_data; long nframe = 0, dim = 0, stride = 0; long t; if(input->nDimension == 1) { nframe = 1; dim = input->size[0]; stride = 1; } else if(input->nDimension == 2) { nframe = input->size[0]; dim = input->size[1]; stride = 1; } else if(input->nDimension == 3) { nframe = 1; dim = input->size[0]; stride = input->size[1]*input->size[2]; } else if(input->nDimension == 4) { nframe = input->size[0]; dim = input->size[1]; stride = input->size[2]*input->size[3]; } else THError("1D, 2D, 3D or 4D tensor expected"); THFloatTensor_resizeAs(output, input); input_data = THFloatTensor_data(input); output_data = THFloatTensor_data(output); #pragma omp parallel for private(t) for(t = 0; t < stride*nframe; t++) { float *input_ptr = input_data + (t/stride)*dim*stride + t % stride; float *output_ptr = output_data + (t/stride)*dim*stride + t % stride; float inputMax = -THInf; float sum; long d; for(d = 0; d < dim; d++) { if (input_ptr[d*stride] >= inputMax) inputMax = input_ptr[d*stride]; } sum = 0; for(d = 0; d < dim; d++) { float z = THExpMinusApprox(inputMax - input_ptr[d*stride]); output_ptr[d*stride] = z; sum += z; } for(d = 0; d < dim; d++) { output_ptr[d*stride] *= 1/sum; } } return output; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-6,8),ceild(8*t2-Nz-19,32));t3<=min(floord(4*Nt+Ny-9,32),floord(4*t1+Ny-1,32));t3++) { for (t4=max(max(ceild(t1-62,64),ceild(8*t2-Nz-243,256)),ceild(32*t3-Ny-243,256));t4<=min(min(floord(4*Nt+Nx-9,256),floord(4*t1+Nx-1,256)),floord(32*t3+Nx+19,256));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),8*t3+6),64*t4+62);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
wshfl.c
/* Copyright 2018-2019. Massachusetts Institute of Technology. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2018-2019 Siddharth Iyer <ssi@mit.edu> * * Tamir J, Uecker M, Chen W, Lai P, Alley MT, Vasanawala SS, Lustig M. * T2 shuffling: Sharp, multicontrast, volumetric fast spin‐echo imaging. * Magnetic resonance in medicine. 2017 Jan 1;77(1):180-95. * * B Bilgic, BA Gagoski, SF Cauley, AP Fan, JR Polimeni, PE Grant, * LL Wald, and K Setsompop, Wave-CAIPI for highly accelerated 3D * imaging. Magn Reson Med (2014) doi: 10.1002/mrm.25347 * * Iyer S, Bilgic B, Setsompop K. * Faster T2 shuffling with Wave. * Presented in the session: "Signal Encoding and Decoding" at ISMRM 2018. * https://www.ismrm.org/18/program_files/O67.htm */ #include <stdbool.h> #include <complex.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "num/multind.h" #include "num/flpmath.h" #include "num/fft.h" #include "num/init.h" #include "num/iovec.h" #include "num/ops.h" #include "num/ops_p.h" #ifdef USE_CUDA #include "num/gpuops.h" #endif #include "iter/iter.h" #include "iter/lsqr.h" #include "iter/misc.h" #include "linops/linop.h" #include "linops/fmac.h" #include "linops/someops.h" #include "linops/decompose_complex.h" #include "misc/debug.h" #include "misc/mri.h" #include "misc/utils.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/opts.h" #include "wavelet/wavthresh.h" #include "lowrank/lrthresh.h" #include "grecon/optreg.h" #include "grecon/italgo.h" static const char help_str[] = "Perform a wave-shuffling reconstruction.\n\n" "Conventions:\n" " * (sx, sy, sz) - Spatial dimensions.\n" " * wx - Extended FOV in READ_DIM due to\n" " wave's voxel spreading.\n" " * (nc, md) - Number of channels and ESPIRiT's \n" " extended-SENSE model operator\n" " dimensions (or # of maps).\n" " * (tf, tk) - Turbo-factor and the rank\n" " of the temporal basis used in\n" " shuffling.\n" " * ntr - Number of TRs, or the number of\n" " (ky, kz) points acquired of one\n" " echo image.\n" " * n - Total number of (ky, kz) points\n" " acquired. This is equal to the\n" " product of ntr and tf.\n\n" "Descriptions:\n" " * reorder is an (n by 3) index matrix such that\n" " [ky, kz, t] = reorder(i, :) represents the\n" " (ky, kz) kspace position of the readout line\n" " acquired at echo number (t), and 0 <= ky < sy,\n" " 0 <= kz < sz, 0 <= t < tf).\n" " * table is a (wx by nc by n) matrix such that\n" " table(:, :, k) represents the kth multichannel\n" " kspace line.\n\n" "Expected dimensions:\n" " * maps - ( sx, sy, sz, nc, md, 1, 1)\n" " * wave - ( wx, sy, sz, 1, 1, 1, 1)\n" " * phi - ( 1, 1, 1, 1, 1, tf, tk)\n" " * output - ( sx, sy, sz, 1, md, 1, tk)\n" " * reorder - ( n, 3, 1, 1, 1, 1, 1)\n" " * table - ( wx, nc, n, 1, 1, 1, 1)"; /* Helper function to print out operator dimensions. */ static void print_opdims(const struct linop_s* op) { const struct iovec_s* domain = linop_domain(op); const struct iovec_s* codomain = linop_codomain(op); debug_printf(DP_INFO, "\tDomain: ["); for (long k = 0; k < domain->N; k ++) debug_printf(DP_INFO, "%6ld", domain->dims[k]); debug_printf(DP_INFO, "]\n"); debug_printf(DP_INFO, "\tCodomain: ["); for (long k = 0; k < codomain->N; k ++) debug_printf(DP_INFO, "%6ld", codomain->dims[k]); debug_printf(DP_INFO, "]\n"); } /* Construct sampling mask array from reorder tables. */ static void construct_mask( long reorder_dims[DIMS], complex float* reorder, long mask_dims[DIMS], complex float* mask) { long n = reorder_dims[0]; long sy = mask_dims[1]; long sz = mask_dims[2]; long y = 0; long z = 0; long t = 0; for (int i = 0; i < n; i++) { y = lround(creal(reorder[i])); z = lround(creal(reorder[i + n])); t = lround(creal(reorder[i + 2 * n])); mask[(y + z * sy) + t * sy * sz] = 1; } } struct kern_s { INTERFACE(linop_data_t); unsigned int N; long* reorder_dims; // Dimension of the index table: ( n, 3, 1, 1, 1, 1, 1, 1) long* phi_dims; // Dimension of the temporal basis: ( 1, 1, 1, 1, 1, tf, tk, 1) long* table_dims; // Dimension of the data table: (wx, nc, n, 1, 1, 1, 1, 1) long* kernel_dims; // Dimension of the kernel: ( 1, sy, sz, 1, 1, 1, tk, tk) complex float* reorder; complex float* phi; complex float* kernel; complex float* gpu_kernel; }; static DEF_TYPEID(kern_s); /* Go to table from coefficient-kspace with memory efficiency. */ static void kern_apply(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long n = data->reorder_dims[0]; long tf = data->phi_dims[5]; long tk = data->phi_dims[6]; long input_dims[] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = wx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long perm_dims[] = { [0 ... DIMS - 1] = 1 }; perm_dims[0] = wx; perm_dims[1] = nc; perm_dims[3] = tk; perm_dims[4] = sy; perm_dims[5] = sz; complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, src); unsigned int permute_order[DIMS] = {0, 3, 5, 6, 1, 2, 4, 7}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, perm_dims, perm, input_dims, src, CFL_SIZE); long vec_dims[] = {wx, nc, tf, 1}; long phi_mat_dims[] = { 1, 1, tf, tk}; long phi_in_dims[] = {wx, nc, 1, tk}; long fmac_dims[] = {wx, nc, tf, tk}; long line_dims[] = {wx, nc, 1, 1}; complex float* vec = md_alloc_sameplace(4, vec_dims, CFL_SIZE, src); long vec_str[4]; md_calc_strides(4, vec_str, vec_dims, CFL_SIZE); long phi_mat_str[4]; md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE); long phi_in_str[4]; md_calc_strides(4, phi_in_str, phi_in_dims, CFL_SIZE); long fmac_str[4]; md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE); int y = -1; int z = -1; int t = -1; for (int i = 0; i < n; i ++) { y = lround(creal(data->reorder[i])); z = lround(creal(data->reorder[i + n])); t = lround(creal(data->reorder[i + 2 * n])); md_clear(4, vec_dims, vec, CFL_SIZE); md_zfmac2(4, fmac_dims, vec_str, vec, phi_in_str, (perm + ((wx * nc * tk) * (y + z * sy))), phi_mat_str, data->phi); md_copy(4, line_dims, dst + (i * wx * nc), vec + (t * wx * nc), CFL_SIZE); } md_free(perm); md_free(vec); } /* Collapse data table into the temporal basis for memory efficiency. */ static void kern_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src) { struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long n = data->reorder_dims[0]; long tf = data->phi_dims[5]; long tk = data->phi_dims[6]; long perm_dims[] = { [0 ... DIMS - 1] = 1 }; perm_dims[0] = wx; perm_dims[1] = nc; perm_dims[3] = tk; perm_dims[4] = sy; perm_dims[5] = sz; complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, dst); md_clear(DIMS, perm_dims, perm, CFL_SIZE); #ifdef _OPENMP long num_threads = omp_get_max_threads(); #else long num_threads = 1; #endif long vec_dims[] = {wx, nc, tf, 1}; long phi_mat_dims[] = { 1, 1, tf, tk}; long phi_out_dims[] = {wx, nc, 1, tk}; long fmac_dims[] = {wx, nc, tf, tk}; long line_dims[] = {wx, nc, 1, 1}; long vthrd_dims[] = {wx, nc, tf, 1, num_threads}; complex float* vec = md_alloc_sameplace(5, vthrd_dims, CFL_SIZE, dst); md_clear(5, vthrd_dims, vec, CFL_SIZE); long vec_str[4]; md_calc_strides(4, vec_str, vec_dims, CFL_SIZE); long phi_mat_str[4]; md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE); long phi_out_str[4]; md_calc_strides(4, phi_out_str, phi_out_dims, CFL_SIZE); long fmac_str[4]; md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE); long flag_dims[1] = { n }; complex float* flags = md_calloc(1, flag_dims, CFL_SIZE); #pragma omp parallel for for (int k = 0; k < n; k ++) { #ifdef _OPENMP int tid = omp_get_thread_num(); #else int tid = 0; #endif int y = lround(creal(data->reorder[k])); int z = lround(creal(data->reorder[k + n])); int t = -1; if (0 == flags[k]) { md_clear(4, vec_dims, vec + (wx * nc * tf * tid), CFL_SIZE); for (int i = k; i < n; i ++) { if ((y == lround(creal(data->reorder[i]))) && (z == lround(creal(data->reorder[i + n])))) { flags[i] = 1; t = lround(creal(data->reorder[i + 2 * n])); md_copy(4, line_dims, (vec + (wx * nc * tf * tid) + t * wx * nc), (src + i * wx * nc), CFL_SIZE); } } md_zfmacc2(4, fmac_dims, phi_out_str, perm + (y + z * sy) * (wx * nc * tk), vec_str, vec + (wx * nc * tf * tid), phi_mat_str, data->phi); } } long out_dims[] = { [0 ... DIMS - 1] = 1 }; out_dims[0] = wx; out_dims[1] = sy; out_dims[2] = sz; out_dims[3] = nc; out_dims[6] = tk; unsigned int permute_order[DIMS] = {0, 4, 5, 1, 6, 2, 3, 7}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, out_dims, dst, perm_dims, perm, CFL_SIZE); md_free(vec); md_free(perm); md_free(flags); } static void kern_normal(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long tk = data->phi_dims[6]; long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = wx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long input_str[DIMS]; md_calc_strides(DIMS, input_str, input_dims, CFL_SIZE); long output_dims[DIMS]; md_copy_dims(DIMS, output_dims, input_dims); output_dims[6] = 1; output_dims[7] = tk; long output_str[DIMS]; md_calc_strides(DIMS, output_str, output_dims, CFL_SIZE); long gpu_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, gpu_kernel_dims, data->kernel_dims); gpu_kernel_dims[0] = wx; gpu_kernel_dims[3] = nc; long kernel_str[DIMS]; md_calc_strides(DIMS, kernel_str, data->kernel_dims, CFL_SIZE); long gpu_kernel_str[DIMS]; md_calc_strides(DIMS, gpu_kernel_str, gpu_kernel_dims, CFL_SIZE); long fmac_dims[DIMS]; md_merge_dims(DIMS, fmac_dims, input_dims, data->kernel_dims); md_clear(DIMS, output_dims, dst, CFL_SIZE); #ifdef USE_CUDA if(cuda_ondevice(src)) md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, gpu_kernel_str, data->gpu_kernel); else #endif md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, kernel_str, data->kernel); } static void kern_free(const linop_data_t* _data) { const struct kern_s* data = CAST_DOWN(kern_s, _data); xfree(data->reorder_dims); xfree(data->phi_dims); xfree(data->table_dims); xfree(data->kernel_dims); #ifdef USE_CUDA if (data->gpu_kernel != NULL) md_free(data->gpu_kernel); #endif xfree(data); } static const struct linop_s* linop_kern_create(bool gpu_flag, const long _reorder_dims[DIMS], complex float* reorder, const long _phi_dims[DIMS], complex float* phi, const long _kernel_dims[DIMS], complex float* kernel, const long _table_dims[DIMS]) { PTR_ALLOC(struct kern_s, data); SET_TYPEID(kern_s, data); PTR_ALLOC(long[DIMS], reorder_dims); PTR_ALLOC(long[DIMS], phi_dims); PTR_ALLOC(long[DIMS], table_dims); PTR_ALLOC(long[DIMS], kernel_dims); md_copy_dims(DIMS, *reorder_dims, _reorder_dims); md_copy_dims(DIMS, *phi_dims, _phi_dims); md_copy_dims(DIMS, *table_dims, _table_dims); md_copy_dims(DIMS, *kernel_dims, _kernel_dims); data->reorder_dims = *PTR_PASS(reorder_dims); data->phi_dims = *PTR_PASS(phi_dims); data->table_dims = *PTR_PASS(table_dims); data->kernel_dims = *PTR_PASS(kernel_dims); data->reorder = reorder; data->phi = phi; data->kernel = kernel; data->gpu_kernel = NULL; #ifdef USE_CUDA if(gpu_flag) { long repmat_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, repmat_kernel_dims, _kernel_dims); repmat_kernel_dims[0] = _table_dims[0]; repmat_kernel_dims[3] = _table_dims[1]; long kernel_strs[DIMS]; long repmat_kernel_strs[DIMS]; md_calc_strides(DIMS, kernel_strs, _kernel_dims, CFL_SIZE); md_calc_strides(DIMS, repmat_kernel_strs, repmat_kernel_dims, CFL_SIZE); complex float* repmat_kernel = md_calloc(DIMS, repmat_kernel_dims, CFL_SIZE); md_copy2(DIMS, repmat_kernel_dims, repmat_kernel_strs, repmat_kernel, kernel_strs, kernel, CFL_SIZE); data->gpu_kernel = md_gpu_move(DIMS, repmat_kernel_dims, repmat_kernel, CFL_SIZE); md_free(repmat_kernel); } #else UNUSED(gpu_flag); #endif long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = _table_dims[0]; input_dims[1] = _kernel_dims[1]; input_dims[2] = _kernel_dims[2]; input_dims[3] = _table_dims[1]; input_dims[6] = _phi_dims[6]; long output_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; output_dims[0] = _table_dims[0]; output_dims[1] = _table_dims[1]; output_dims[2] = _reorder_dims[0]; const struct linop_s* K = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), kern_apply, kern_adjoint, kern_normal, NULL, kern_free); return K; } struct multc_s { INTERFACE(linop_data_t); unsigned int nc; unsigned int md; const complex float* maps; const struct linop_s* sc_op; // Single channel operator. }; static DEF_TYPEID(multc_s); static void multc_apply(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct multc_s* data = CAST_DOWN(multc_s, _data); // Loading single channel operator. const struct operator_s* fwd = data->sc_op->forward; const long* sc_inp_dims = linop_domain(data->sc_op)->dims; const long* sc_out_dims = linop_codomain(data->sc_op)->dims; long sx = sc_inp_dims[0]; long sy = sc_inp_dims[1]; long sz = sc_inp_dims[2]; long wx = sc_out_dims[0]; long n = sc_out_dims[2]; long nc = data->nc; long md = data->md; long src_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, src_dims, sc_inp_dims); src_dims[MAPS_DIM] = md; long dst_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, dst_dims, sc_out_dims); dst_dims[1] = nc; long map_dims[] = { [0 ... DIMS - 1] = 1}; map_dims[0] = sx; map_dims[1] = sy; map_dims[2] = sz; map_dims[3] = nc; map_dims[4] = md; long single_map_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_map_dims, map_dims); single_map_dims[COIL_DIM] = 1; complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src); complex float* buffer = md_alloc_sameplace(DIMS, sc_inp_dims, CFL_SIZE, src); long tbl_dims[] = { [0 ... DIMS - 1] = 1}; tbl_dims[0] = wx; tbl_dims[1] = n; tbl_dims[2] = nc; complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src); md_clear(DIMS, tbl_dims, tbl, CFL_SIZE); long pos[] = { [0 ... DIMS - 1] = 0 }; long zfmac_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, zfmac_dims, src_dims); long strides_single_map[DIMS]; md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE); long strides_src[DIMS]; md_calc_strides(DIMS, strides_src, src_dims, CFL_SIZE); long strides_sc_inp[DIMS]; md_calc_strides(DIMS, strides_sc_inp, sc_inp_dims, CFL_SIZE); for (long k = 0; k < data->nc; k++) { md_clear(DIMS, single_map_dims, single_map, CFL_SIZE); md_clear(DIMS, sc_inp_dims, buffer, CFL_SIZE); pos[COIL_DIM] = k; md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE); pos[COIL_DIM] = 0; md_zfmac2(DIMS, zfmac_dims, strides_sc_inp, buffer, strides_src, src, strides_single_map, single_map); operator_apply(fwd, DIMS, sc_out_dims, tbl + (wx * n * k), DIMS, sc_inp_dims, buffer); } md_clear(DIMS, dst_dims, dst, CFL_SIZE); unsigned int permute_order[DIMS] = {0, 2, 1}; for (unsigned int i = 3; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, dst_dims, dst, tbl_dims, tbl, CFL_SIZE); md_free(single_map); md_free(buffer); md_free(tbl); } static void multc_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct multc_s* data = CAST_DOWN(multc_s, _data); // Loading single channel operator. const struct operator_s* adj = data->sc_op->adjoint; const long* sc_inp_dims = linop_codomain(data->sc_op)->dims; const long* sc_out_dims = linop_domain(data->sc_op)->dims; long sx = sc_out_dims[0]; long sy = sc_out_dims[1]; long sz = sc_out_dims[2]; long wx = sc_inp_dims[0]; long n = sc_inp_dims[2]; long nc = data->nc; long md = data->md; long src_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, src_dims, sc_inp_dims); src_dims[1] = nc; long dst_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, dst_dims, sc_out_dims); dst_dims[MAPS_DIM] = md; long map_dims[] = { [0 ... DIMS - 1] = 1}; map_dims[0] = sx; map_dims[1] = sy; map_dims[2] = sz; map_dims[3] = nc; map_dims[4] = md; long single_map_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_map_dims, map_dims); single_map_dims[COIL_DIM] = 1; complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src); complex float* buffer1 = md_alloc_sameplace(DIMS, sc_out_dims, CFL_SIZE, src); complex float* buffer2 = md_alloc_sameplace(DIMS, dst_dims, CFL_SIZE, src); long tbl_dims[] = { [0 ... DIMS - 1] = 1}; tbl_dims[0] = wx; tbl_dims[2] = n; complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src); long pos[] = { [0 ... DIMS - 1] = 0 }; long strides_single_map[DIMS]; md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE); long strides_sc_out[DIMS]; md_calc_strides(DIMS, strides_sc_out, sc_out_dims, CFL_SIZE); long strides_dst[DIMS]; md_calc_strides(DIMS, strides_dst, dst_dims, CFL_SIZE); md_clear(DIMS, dst_dims, dst, CFL_SIZE); for (long k = 0; k < data->nc; k++) { md_clear(DIMS, single_map_dims, single_map, CFL_SIZE); md_clear(DIMS, sc_out_dims, buffer1, CFL_SIZE); md_clear(DIMS, dst_dims, buffer2, CFL_SIZE); md_clear(DIMS, tbl_dims, tbl, CFL_SIZE); pos[1] = k; md_slice(DIMS, 2, pos, src_dims, tbl, src, CFL_SIZE); pos[1] = 0; operator_apply(adj, DIMS, sc_out_dims, buffer1, DIMS, tbl_dims, tbl); pos[COIL_DIM] = k; md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE); pos[COIL_DIM] = 0; md_zfmacc2(DIMS, dst_dims, strides_dst, buffer2, strides_sc_out, buffer1, strides_single_map, single_map); md_zadd(DIMS, dst_dims, dst, dst, buffer2); } md_free(single_map); md_free(buffer1); md_free(buffer2); md_free(tbl); } static void multc_normal(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct multc_s* data = CAST_DOWN(multc_s, _data); // Loading single channel operator. const struct operator_s* nrm = data->sc_op->normal; const long* sc_dims = linop_domain(data->sc_op)->dims; long sx = sc_dims[0]; long sy = sc_dims[1]; long sz = sc_dims[2]; long nc = data->nc; long md = data->md; long dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, dims, sc_dims); dims[MAPS_DIM] = md; long map_dims[] = { [0 ... DIMS - 1] = 1}; map_dims[0] = sx; map_dims[1] = sy; map_dims[2] = sz; map_dims[3] = nc; map_dims[4] = md; long single_map_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_map_dims, map_dims); single_map_dims[COIL_DIM] = 1; complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src); complex float* buffer1 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src); complex float* buffer2 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src); complex float* buffer3 = md_alloc_sameplace(DIMS, dims, CFL_SIZE, src); long pos[] = { [0 ... DIMS - 1] = 0 }; long strides_single_map[DIMS]; md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE); long strides_sc[DIMS]; md_calc_strides(DIMS, strides_sc, sc_dims, CFL_SIZE); long strides[DIMS]; md_calc_strides(DIMS, strides, dims, CFL_SIZE); md_clear(DIMS, dims, dst, CFL_SIZE); for (long k = 0; k < data->nc; k++) { md_clear(DIMS, single_map_dims, single_map, CFL_SIZE); md_clear(DIMS, sc_dims, buffer1, CFL_SIZE); md_clear(DIMS, sc_dims, buffer2, CFL_SIZE); md_clear(DIMS, dims, buffer3, CFL_SIZE); pos[COIL_DIM] = k; md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE); pos[COIL_DIM] = 0; md_zfmac2(DIMS, dims, strides_sc, buffer1, strides, src, strides_single_map, single_map); operator_apply(nrm, DIMS, sc_dims, buffer2, DIMS, sc_dims, buffer1); md_zfmacc2(DIMS, dims, strides, buffer3, strides_sc, buffer2, strides_single_map, single_map); md_zadd(DIMS, dims, dst, dst, buffer3); } md_free(single_map); md_free(buffer1); md_free(buffer2); md_free(buffer3); } static void multc_free(const linop_data_t* _data) { const struct multc_s* data = CAST_DOWN(multc_s, _data); xfree(data); } static struct linop_s* linop_multc_create(long nc, long md, const complex float* maps, const struct linop_s* sc_op) { PTR_ALLOC(struct multc_s, data); SET_TYPEID(multc_s, data); data->nc = nc; data->md = md; data->maps = maps; data->sc_op = sc_op; long* op_inp_dims = (long*) linop_domain(sc_op)->dims; long* op_out_dims = (long*) linop_codomain(sc_op)->dims; long input_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, input_dims, op_inp_dims); input_dims[MAPS_DIM] = md; long output_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, output_dims, op_out_dims); output_dims[1] = nc; struct linop_s* E = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), multc_apply, multc_adjoint, multc_normal, NULL, multc_free); return E; } /* Resize operator. */ static const struct linop_s* linop_wavereshape_create(long wx, long sx, long sy, long sz, long nc, long tk) { long input_dims[] = { [0 ... DIMS - 1] = 1}; input_dims[0] = sx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long output_dims[DIMS]; md_copy_dims(DIMS, output_dims, input_dims); output_dims[0] = wx; struct linop_s* R = linop_resize_create(DIMS, output_dims, input_dims); return R; } /* Fx operator. */ static const struct linop_s* linop_fx_create(long wx, long sy, long sz, long nc, long tk, bool centered) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; struct linop_s* Fx = NULL; if (centered) Fx = linop_fftc_create(DIMS, dims, READ_FLAG); else Fx = linop_fft_create(DIMS, dims, READ_FLAG); return Fx; } /* Wave operator. */ static const struct linop_s* linop_wave_create(long wx, long sy, long sz, long nc, long tk, long psf_tk, complex float* psf) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; return (psf_tk > 1) ? linop_cdiag_create(DIMS, dims, FFT_FLAGS | COEFF_FLAG, psf) : linop_cdiag_create(DIMS, dims, FFT_FLAGS, psf); } /* Fyz operator. */ static const struct linop_s* linop_fyz_create(long wx, long sy, long sz, long nc, long tk, bool centered) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; struct linop_s* Fyz = NULL; if (centered) Fyz = linop_fftc_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG); else Fyz = linop_fft_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG); return Fyz; } /* Construction sampling temporal kernel.*/ static void construct_kernel( long mask_dims[DIMS], complex float* mask, long phi_dims[DIMS], complex float* phi, long kern_dims[DIMS], complex float* kern) { long sy = mask_dims[1]; long sz = mask_dims[2]; long tf = phi_dims[5]; long tk = phi_dims[6]; long cvec_dims[] = { [0 ... DIMS - 1] = 1 }; cvec_dims[6] = tk; long cvec_str[DIMS]; md_calc_strides(DIMS, cvec_str, cvec_dims, CFL_SIZE); complex float cvec[tk]; long tvec_dims[] = { [0 ... DIMS - 1] = 1 }; tvec_dims[5] = tf; long tvec_str[DIMS]; md_calc_strides(DIMS, tvec_str, tvec_dims, CFL_SIZE); complex float mvec[tf]; complex float tvec1[tf]; complex float tvec2[tf]; long phi_str[DIMS]; md_calc_strides(DIMS, phi_str, phi_dims, CFL_SIZE); long out_dims[] = { [0 ... DIMS - 1] = 1 }; out_dims[0] = tk; out_dims[1] = sy; out_dims[2] = sz; out_dims[3] = tk; complex float* out = md_calloc(DIMS, out_dims, CFL_SIZE); for (int y = 0; y < sy; y ++) { for (int z = 0; z < sz; z ++) { for (int t = 0; t < tf; t ++) mvec[t] = mask[(y + sy * z) + (sy * sz) * t]; for (int t = 0; t < tk; t ++) { cvec[t] = 1; md_clear(DIMS, tvec_dims, tvec1, CFL_SIZE); md_zfmac2(DIMS, phi_dims, tvec_str, tvec1, cvec_str, cvec, phi_str, phi); md_clear(DIMS, tvec_dims, tvec2, CFL_SIZE); md_zfmac2(DIMS, tvec_dims, tvec_str, tvec2, tvec_str, tvec1, tvec_str, mvec); md_clear(DIMS, cvec_dims, out + y * tk + z * sy * tk + t * sy * sz * tk, CFL_SIZE); md_zfmacc2(DIMS, phi_dims, cvec_str, out + y * tk + z * sy * tk + t * sy * sz * tk, tvec_str, tvec2, phi_str, phi); cvec[t] = 0; } } } unsigned int permute_order[DIMS] = {4, 1, 2, 5, 6, 7, 3, 0}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, kern_dims, kern, out_dims, out, CFL_SIZE); md_free(out); } static void fftmod_apply(long sy, long sz, long reorder_dims[DIMS], complex float* reorder, long table_dims[DIMS], complex float* table, long maps_dims[DIMS], complex float* maps) { long wx = table_dims[0]; long nc = table_dims[1]; fftmod(DIMS, table_dims, READ_FLAG, table, table); fftmod(DIMS, maps_dims, FFT_FLAGS, maps, maps); long y = -1; long z = -1; double dy = ((double) sy/2)/((double) sy); double dz = ((double) sz/2)/((double) sz); complex float py = 1; complex float pz = 1; long dims[] = { [0 ... DIMS] = 1}; dims[0] = wx; dims[1] = nc; long n = reorder_dims[0]; for (long k = 0; k < n; k++) { y = lround(creal(reorder[k])); z = lround(creal(reorder[k + n])); py = cexp(2.i * M_PI * dy * y); pz = cexp(2.i * M_PI * dz * z); md_zsmul(DIMS, dims, table + k * wx * nc, table + k * wx * nc, py * pz); } } int main_wshfl(int argc, char* argv[argc]) { double start_time = timestamp(); const char* maps_file = NULL; const char* wave_file = NULL; const char* phi_file = NULL; const char* reorder_file = NULL; const char* table_file = NULL; const char* out_file = NULL; struct arg_s args[] = { ARG_INFILE(true, &maps_file, "maps"), ARG_INFILE(true, &wave_file, "wave"), ARG_INFILE(true, &phi_file, "phi"), ARG_INFILE(true, &reorder_file, "reorder"), ARG_INFILE(true, &table_file, "table"), ARG_OUTFILE(true, &out_file, "output"), }; struct opt_reg_s ropts; opt_reg_init(&ropts); int maxiter = 30; int cgiter = 10; int blksize = 8; float rho = 1; float eval = -1; float tol = 1E-3; bool hgwld = false; bool ksp = false; const char* fwd = NULL; const char* x0 = NULL; bool use_gpu = false; bool dcx = false; const struct opt_s opts[] = { { 'R', NULL, true, OPT_SPECIAL, opt_reg, &ropts, "<T>:A:B:C", "Generalized regularization options. (-Rh for help)" }, OPT_INT( 'b', &blksize, "blkdim", "Block size for locally low rank."), OPT_INT( 'i', &maxiter, "mxiter", "Maximum number of iterations."), OPT_INT( 'j', &cgiter, "cgiter", "Maximum number of CG iterations in ADMM."), OPT_FLOAT( 's', &rho, "admrho", "ADMM Rho value."), OPT_FLOAT( 'e', &eval, "eigval", "Eigenvalue to scale step size. (Optional.)"), OPT_INFILE( 'F', &fwd, "frwrd", "Go from shfl-coeffs to data-table. Pass in coeffs path."), OPT_INFILE( 'O', &x0, "initl", "Initialize reconstruction with guess."), OPT_FLOAT( 't', &tol, "toler", "Tolerance convergence condition for FISTA."), OPT_SET( 'g', &use_gpu, "Use GPU."), OPT_SET( 'K', &ksp, "Go from data-table to shuffling basis k-space."), OPT_SET( 'H', &hgwld, "Use hogwild."), OPT_SET( 'v', &dcx, "Split coefficients to real and imaginary components."), }; cmdline(&argc, argv, ARRAY_SIZE(args), args, help_str, ARRAY_SIZE(opts), opts); struct admm_conf admm = { false, false, false, rho, cgiter }; debug_printf(DP_INFO, "Loading data... "); long maps_dims[DIMS]; complex float* maps = load_cfl(maps_file, DIMS, maps_dims); long wave_dims[DIMS]; complex float* wave = load_cfl(wave_file, DIMS, wave_dims); long phi_dims[DIMS]; complex float* phi = load_cfl(phi_file, DIMS, phi_dims); long reorder_dims[DIMS]; complex float* reorder = load_cfl(reorder_file, DIMS, reorder_dims); long table_dims[DIMS]; complex float* table = load_cfl(table_file, DIMS, table_dims); debug_printf(DP_INFO, "Done.\n"); (use_gpu ? num_init_gpu : num_init)(); int wx = wave_dims[0]; int sx = maps_dims[0]; int sy = maps_dims[1]; int sz = maps_dims[2]; int nc = maps_dims[3]; int md = maps_dims[4]; int tf = phi_dims[5]; int tk = phi_dims[6]; debug_printf(DP_INFO, "Constructing sampling mask from reorder table... "); long mask_dims[] = { [0 ... DIMS - 1] = 1 }; mask_dims[1] = sy; mask_dims[2] = sz; mask_dims[5] = tf; complex float* mask = md_calloc(DIMS, mask_dims, CFL_SIZE); construct_mask(reorder_dims, reorder, mask_dims, mask); debug_printf(DP_INFO, "Done.\n"); debug_printf(DP_INFO, "Constructing sampling-temporal kernel... "); long kernel_dims[] = { [0 ... DIMS - 1] = 1 }; kernel_dims[1] = sy; kernel_dims[2] = sz; kernel_dims[6] = tk; kernel_dims[7] = tk; complex float* kernel = md_calloc(DIMS, kernel_dims, CFL_SIZE); construct_kernel(mask_dims, mask, phi_dims, phi, kernel_dims, kernel); md_free(mask); debug_printf(DP_INFO, "Done.\n"); long coeff_dims[] = { [0 ... DIMS - 1] = 1 }; coeff_dims[0] = sx; coeff_dims[1] = sy; coeff_dims[2] = sz; coeff_dims[4] = md; coeff_dims[6] = tk; coeff_dims[8] = dcx ? 2 : 1; if (ksp == true) { const struct linop_s* Knc = linop_kern_create(use_gpu, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, table_dims); long ksp_dims[] = { [0 ... DIMS - 1] = 1 }; ksp_dims[0] = wx; ksp_dims[1] = sy; ksp_dims[2] = sz; ksp_dims[3] = nc; ksp_dims[6] = tk; complex float* res = create_cfl(out_file, DIMS, ksp_dims); operator_apply(Knc->adjoint, DIMS, ksp_dims, res, DIMS, table_dims, table); linop_free(Knc); md_free(kernel); unmap_cfl(DIMS, maps_dims, maps); unmap_cfl(DIMS, wave_dims, wave); unmap_cfl(DIMS, phi_dims, phi); unmap_cfl(DIMS, reorder_dims, reorder); unmap_cfl(DIMS, table_dims, table); unmap_cfl(DIMS, ksp_dims, res); return 0; } debug_printf(DP_INFO, "Creating single channel linear operators:\n"); double t1; double t2; t1 = timestamp(); const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk); t2 = timestamp(); debug_printf(DP_INFO, "\tR: %f seconds.\n", t2 - t1); t1 = timestamp(); const struct linop_s* Fx = linop_fx_create(wx, sy, sz, 1, tk, false); t2 = timestamp(); debug_printf(DP_INFO, "\tFx: %f seconds.\n", t2 - t1); t1 = timestamp(); const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave); t2 = timestamp(); debug_printf(DP_INFO, "\tW: %f seconds.\n", t2 - t1); t1 = timestamp(); const struct linop_s* Fyz = linop_fyz_create(wx, sy, sz, 1, tk, false); t2 = timestamp(); debug_printf(DP_INFO, "\tFyz: %f seconds.\n", t2 - t1); t1 = timestamp(); long single_channel_table_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_channel_table_dims, table_dims); single_channel_table_dims[1] = 1; const struct linop_s* K = linop_kern_create(use_gpu, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims); t2 = timestamp(); debug_printf(DP_INFO, "\tK: %f seconds.\n", t2 - t1); struct linop_s* A_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF( R, Fx), W), Fyz), K); debug_printf(DP_INFO, "Single channel forward operator information:\n"); print_opdims(A_sc); struct linop_s* A = linop_multc_create(nc, md, maps, A_sc); debug_printf(DP_INFO, "Overall forward linear operator information:\n"); print_opdims(A); if (fwd != NULL) { debug_printf(DP_INFO, "Going from coefficients to data table... "); complex float* coeffs_to_fwd = load_cfl(fwd, DIMS, coeff_dims); complex float* table_forward = create_cfl(out_file, DIMS, table_dims); const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk); const struct linop_s* CFx = linop_fx_create( wx, sy, sz, 1, tk, true); const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave); const struct linop_s* CFyz = linop_fyz_create(wx, sy, sz, 1, tk, true); const struct linop_s* K = linop_kern_create(use_gpu, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims); struct linop_s* AC_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF( R, CFx), W), CFyz), K); struct linop_s* AC = linop_multc_create(nc, md, maps, AC_sc); operator_apply(AC->forward, DIMS, table_dims, table_forward, DIMS, coeff_dims, coeffs_to_fwd); debug_printf(DP_INFO, "Done.\n"); debug_printf(DP_INFO, "Cleaning up... "); linop_free(AC); linop_free(AC_sc); md_free(kernel); unmap_cfl(DIMS, maps_dims, maps); unmap_cfl(DIMS, wave_dims, wave); unmap_cfl(DIMS, phi_dims, phi); unmap_cfl(DIMS, reorder_dims, reorder); unmap_cfl(DIMS, table_dims, table); unmap_cfl(DIMS, table_dims, table_forward); debug_printf(DP_INFO, "Done.\n"); return 0; } if (dcx) { debug_printf(DP_INFO, "\tSplitting result into real and imaginary components.\n"); struct linop_s* tmp = A; struct linop_s* dcxop = linop_decompose_complex_create(DIMS, ITER_DIM, linop_domain(A)->dims); A = linop_chain(dcxop, tmp); debug_printf(DP_INFO, "New operator information:\n"); print_opdims(A); linop_free(dcxop); linop_free(tmp); } debug_printf(DP_INFO, "Normalizing data table and applying fftmod to table and maps... "); float norm = md_znorm(DIMS, table_dims, table); md_zsmul(DIMS, table_dims, table, table, 1. / norm); fftmod_apply(sy, sz, reorder_dims, reorder, table_dims, table, maps_dims, maps); debug_printf(DP_INFO, "Done.\n"); debug_printf(DP_INFO, "Preparing reconstruction operator: "); const struct operator_p_s* thresh_ops[NUM_REGS] = { NULL }; const struct linop_s* trafos[NUM_REGS] = { NULL }; opt_reg_configure(DIMS, coeff_dims, &ropts, thresh_ops, trafos, blksize, 1, use_gpu); int nr_penalties = ropts.r; struct reg_s* regs = ropts.regs; bool fista = (nr_penalties == 1); // FISTA variables. float step = 0.5; italgo_fun2_t italgo = iter2_call_iter; struct iter_call_s iter2_data; SET_TYPEID(iter_call_s, &iter2_data); iter_conf* iconf = CAST_UP(&iter2_data); struct iter_fista_conf fsconf = iter_fista_defaults; // ADMM variables. struct iter it; if (fista) { if (eval < 0) { #ifdef USE_CUDA eval = use_gpu ? estimate_maxeigenval_gpu(A_sc->normal) : estimate_maxeigenval(A_sc->normal); #else eval = estimate_maxeigenval(A_sc->normal); #endif } step /= eval; debug_printf(DP_INFO, "\tAlgorithm: FISTA.\n"); debug_printf(DP_INFO, "\tMax eigenvalue: %.2e\n", eval); debug_printf(DP_INFO, "\tStep: %.2e\n", step); debug_printf(DP_INFO, "\tTolerance: %.2e\n", tol); fsconf.maxiter = maxiter; fsconf.step = step; fsconf.hogwild = hgwld; fsconf.tol = tol; iter2_data.fun = iter_fista; iter2_data._conf = CAST_UP(&fsconf); } else { debug_printf(DP_INFO, "\tAlgorithm: ADMM\n."); debug_printf(DP_INFO, "\tRho: %.2e\n.", rho); it = italgo_config(ALGO_ADMM, nr_penalties, regs, maxiter, step, hgwld, false, admm, 1, false); } complex float* init = NULL; if (x0 != NULL) { debug_printf(DP_INFO, "Loading in initial guess... "); init = load_cfl(x0, DIMS, coeff_dims); debug_printf(DP_INFO, "Done.\n"); } debug_printf(DP_INFO, "Reconstruction... "); complex float* recon = create_cfl(out_file, DIMS, coeff_dims); struct lsqr_conf lsqr_conf = lsqr_defaults; lsqr_conf.lambda = 0.; lsqr_conf.it_gpu = use_gpu; double recon_start = timestamp(); const struct operator_p_s* J = fista ? lsqr2_create(&lsqr_conf, italgo, iconf, (const float*) init, A, NULL, nr_penalties, thresh_ops, NULL, NULL): lsqr2_create(&lsqr_conf, it.italgo, it.iconf, (const float*) init, A, NULL, nr_penalties, thresh_ops, trafos, NULL); operator_p_apply(J, 1., DIMS, coeff_dims, recon, DIMS, table_dims, table); md_zsmul(DIMS, coeff_dims, recon, recon, norm); double recon_end = timestamp(); debug_printf(DP_INFO, "Done.\nReconstruction time: %f seconds.\n", recon_end - recon_start); debug_printf(DP_INFO, "Cleaning up and saving result... "); operator_p_free(J); italgo_config_free(it); linop_free(A); linop_free(A_sc); md_free(kernel); unmap_cfl(DIMS, maps_dims, maps); unmap_cfl(DIMS, wave_dims, wave); unmap_cfl(DIMS, phi_dims, phi); unmap_cfl(DIMS, reorder_dims, reorder); unmap_cfl(DIMS, table_dims, table); unmap_cfl(DIMS, coeff_dims, recon); if (x0 != NULL) unmap_cfl(DIMS, coeff_dims, init); debug_printf(DP_INFO, "Done.\n"); double end_time = timestamp(); debug_printf(DP_INFO, "Total time: %f seconds.\n", end_time - start_time); return 0; }
1d.pluto-par.512.c
#include <stdio.h> #include <sys/time.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define T TVAL #define N NVAL #define coeff1 0.5 #define coeff2 0.7 double h[N]; double e[N+1]; void init_arrays() { int i1; for (i1=0; i1<N; i1++) h[i1] = (i1) % 5 + 1; for (i1=0; i1<N+1; i1++) e[i1] = (i1) % 5 + 1; } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); int t, i, j, k, l,ii; #define S1(zT0,zT1,t,i) {e[i]=e[i]-coeff1*(h[i]-h[i-1]);} #define S2(zT0,zT1,t,i) {h[i]=h[i]-coeff2*(e[1+i]-e[i]);} int c1, c2, c3, c4, c5; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; for (c1=-1;c1<=floord(N+2*T,512);c1++) { lb1=max(max(0,ceild(256*c1-255,512)),ceild(512*c1-T,512)); ub1=min(min(floord(256*c1+255,256),floord(512*c1+N+511,1024)),floord(N+T,512)); #pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5) for (c2=lb1; c2<=ub1; c2++) { if ((c1 <= floord(1024*c2-N,512)) && (c2 >= ceild(N+1,512))) { S2(c1-c2,-c1+2*c2,512*c2-N,N-1) ; } for (c3=max(max(512*c2-N+1,512*c1-512*c2),1);c3<=min(min(512*c1-512*c2+511,512*c2-N+511),T);c3++) { for (c4=max(512*c2,c3+1);c4<=c3+N-1;c4++) { S1(c1-c2,-c1+2*c2,c3,-c3+c4) ; S2(c1-c2,-c1+2*c2,c3,-c3+c4-1) ; } S2(c1-c2,-c1+2*c2,c3,N-1) ; } /*@ begin Loop( transform Composite( tile = [('c3',T1,'ii')], unrolljam = [('c3',U1),('c4',U2)], vector = (VEC, ['ivdep','vector always']) ) for (c3=max(max(1,512*c1-512*c2),512*c2-N+512);c3<=min(min(512*c1-512*c2+511,T),512*c2+510);c3++) for (c4=max(512*c2,c3+1);c4<=512*c2+511;c4++) { S1(c1-c2,-c1+2*c2,c3,-c3+c4) ; S2(c1-c2,-c1+2*c2,c3,-c3+c4-1) ; } ) @*/{ for (c3=max(max(1,512*c1-512*c2),512*c2-N+512); c3<=min(min(512*c1-512*c2+511,T),512*c2+510)-7; c3=c3+8) { for (c4=max(512*c2,c3+1); c4<=512*c2+504; c4=c4+8) { S1(c1-c2,-c1+2*c2,c3,-c3+c4); S1(c1-c2,-c1+2*c2,c3,-c3+c4+1); S1(c1-c2,-c1+2*c2,c3,-c3+c4+2); S1(c1-c2,-c1+2*c2,c3,-c3+c4+3); S1(c1-c2,-c1+2*c2,c3,-c3+c4+4); S1(c1-c2,-c1+2*c2,c3,-c3+c4+5); S1(c1-c2,-c1+2*c2,c3,-c3+c4+6); S1(c1-c2,-c1+2*c2,c3,-c3+c4+7); S2(c1-c2,-c1+2*c2,c3,-c3+c4-1); S2(c1-c2,-c1+2*c2,c3,-c3+c4); S2(c1-c2,-c1+2*c2,c3,-c3+c4+1); S2(c1-c2,-c1+2*c2,c3,-c3+c4+2); S2(c1-c2,-c1+2*c2,c3,-c3+c4+3); S2(c1-c2,-c1+2*c2,c3,-c3+c4+4); S2(c1-c2,-c1+2*c2,c3,-c3+c4+5); S2(c1-c2,-c1+2*c2,c3,-c3+c4+6); } for (; c4<=512*c2+511; c4=c4+1) { S1(c1-c2,-c1+2*c2,c3,-c3+c4); S2(c1-c2,-c1+2*c2,c3,-c3+c4-1); } for (c4=max(512*c2,c3+2); c4<=512*c2+504; c4=c4+8) { S1(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4); S1(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+1); S1(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+2); S1(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+3); S1(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+4); S1(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+5); S1(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+6); S1(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+7); S2(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4-1); S2(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4); S2(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+1); S2(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+2); S2(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+3); S2(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+4); S2(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+5); S2(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4+6); } for (; c4<=512*c2+511; c4=c4+1) { S1(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4); S2(c1-c2,-c1+2*c2,(c3+1),-(c3+1)+c4-1); } for (c4=max(512*c2,c3+3); c4<=512*c2+504; c4=c4+8) { S1(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4); S1(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+1); S1(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+2); S1(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+3); S1(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+4); S1(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+5); S1(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+6); S1(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+7); S2(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4-1); S2(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4); S2(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+1); S2(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+2); S2(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+3); S2(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+4); S2(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+5); S2(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4+6); } for (; c4<=512*c2+511; c4=c4+1) { S1(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4); S2(c1-c2,-c1+2*c2,(c3+2),-(c3+2)+c4-1); } for (c4=max(512*c2,c3+4); c4<=512*c2+504; c4=c4+8) { S1(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4); S1(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+1); S1(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+2); S1(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+3); S1(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+4); S1(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+5); S1(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+6); S1(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+7); S2(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4-1); S2(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4); S2(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+1); S2(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+2); S2(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+3); S2(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+4); S2(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+5); S2(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4+6); } for (; c4<=512*c2+511; c4=c4+1) { S1(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4); S2(c1-c2,-c1+2*c2,(c3+3),-(c3+3)+c4-1); } for (c4=max(512*c2,c3+5); c4<=512*c2+504; c4=c4+8) { S1(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4); S1(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+1); S1(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+2); S1(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+3); S1(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+4); S1(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+5); S1(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+6); S1(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+7); S2(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4-1); S2(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4); S2(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+1); S2(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+2); S2(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+3); S2(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+4); S2(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+5); S2(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4+6); } for (; c4<=512*c2+511; c4=c4+1) { S1(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4); S2(c1-c2,-c1+2*c2,(c3+4),-(c3+4)+c4-1); } for (c4=max(512*c2,c3+6); c4<=512*c2+504; c4=c4+8) { S1(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4); S1(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+1); S1(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+2); S1(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+3); S1(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+4); S1(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+5); S1(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+6); S1(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+7); S2(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4-1); S2(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4); S2(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+1); S2(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+2); S2(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+3); S2(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+4); S2(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+5); S2(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4+6); } for (; c4<=512*c2+511; c4=c4+1) { S1(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4); S2(c1-c2,-c1+2*c2,(c3+5),-(c3+5)+c4-1); } for (c4=max(512*c2,c3+7); c4<=512*c2+504; c4=c4+8) { S1(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4); S1(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+1); S1(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+2); S1(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+3); S1(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+4); S1(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+5); S1(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+6); S1(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+7); S2(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4-1); S2(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4); S2(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+1); S2(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+2); S2(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+3); S2(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+4); S2(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+5); S2(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4+6); } for (; c4<=512*c2+511; c4=c4+1) { S1(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4); S2(c1-c2,-c1+2*c2,(c3+6),-(c3+6)+c4-1); } for (c4=max(512*c2,c3+8); c4<=512*c2+504; c4=c4+8) { S1(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4); S1(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+1); S1(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+2); S1(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+3); S1(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+4); S1(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+5); S1(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+6); S1(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+7); S2(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4-1); S2(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4); S2(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+1); S2(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+2); S2(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+3); S2(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+4); S2(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+5); S2(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4+6); } for (; c4<=512*c2+511; c4=c4+1) { S1(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4); S2(c1-c2,-c1+2*c2,(c3+7),-(c3+7)+c4-1); } } for (; c3<=min(min(512*c1-512*c2+511,T),512*c2+510); c3=c3+1) { for (c4=max(512*c2,c3+1); c4<=512*c2+504; c4=c4+8) { S1(c1-c2,-c1+2*c2,c3,-c3+c4); S1(c1-c2,-c1+2*c2,c3,-c3+c4+1); S1(c1-c2,-c1+2*c2,c3,-c3+c4+2); S1(c1-c2,-c1+2*c2,c3,-c3+c4+3); S1(c1-c2,-c1+2*c2,c3,-c3+c4+4); S1(c1-c2,-c1+2*c2,c3,-c3+c4+5); S1(c1-c2,-c1+2*c2,c3,-c3+c4+6); S1(c1-c2,-c1+2*c2,c3,-c3+c4+7); S2(c1-c2,-c1+2*c2,c3,-c3+c4-1); S2(c1-c2,-c1+2*c2,c3,-c3+c4); S2(c1-c2,-c1+2*c2,c3,-c3+c4+1); S2(c1-c2,-c1+2*c2,c3,-c3+c4+2); S2(c1-c2,-c1+2*c2,c3,-c3+c4+3); S2(c1-c2,-c1+2*c2,c3,-c3+c4+4); S2(c1-c2,-c1+2*c2,c3,-c3+c4+5); S2(c1-c2,-c1+2*c2,c3,-c3+c4+6); } for (; c4<=512*c2+511; c4=c4+1) { S1(c1-c2,-c1+2*c2,c3,-c3+c4); S2(c1-c2,-c1+2*c2,c3,-c3+c4-1); } } } /*@ end @*/ } } annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; printf("%f\n", annot_t_total); return 1; }
mixedup_linear_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_MIXEDUP_SOLVER_H_INCLUDED ) #define KRATOS_MIXEDUP_SOLVER_H_INCLUDED // System includes #include <string> #include <iostream> #include <sstream> #include <cstddef> // External includes // Project includes #include "includes/define.h" #include "reorderer.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "linear_solvers/iterative_solver.h" #include <boost/numeric/ublas/vector.hpp> #include "utilities/openmp_utils.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** This solver is designed for the solution of mixed U-P problems. * It uses a block structure diving the matrix in UU PP UP PU blocks * and uses "standard" linear solvers for the different blocks as well as a GMRES for the outer part */ template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType = Preconditioner<TSparseSpaceType, TDenseSpaceType>, class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> > class MixedUPLinearSolver : public IterativeSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType> { public: ///@name Type Definitions ///@{ /// Pointer definition of MixedUPLinearSolver KRATOS_CLASS_POINTER_DEFINITION (MixedUPLinearSolver); typedef IterativeSolver<TSparseSpaceType, TDenseSpaceType, TPreconditionerType, TReordererType> BaseType; typedef typename TSparseSpaceType::MatrixType SparseMatrixType; typedef typename TSparseSpaceType::VectorType VectorType; typedef typename TDenseSpaceType::MatrixType DenseMatrixType; typedef typename TDenseSpaceType::VectorType DenseVectorType; typedef std::size_t SizeType; ///@} ///@name Life Cycle ///@{ /// Default constructor. MixedUPLinearSolver (typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_UU_block, typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_PP_block, double NewMaxTolerance, unsigned int NewMaxIterationsNumber, unsigned int m ) : BaseType (NewMaxTolerance, NewMaxIterationsNumber) { //saving the linear solvers to be used in the solution process mpsolver_UU_block = psolver_UU_block; mpsolver_PP_block = psolver_PP_block; mBlocksAreAllocated = false; mis_initialized = false; mm = m; } MixedUPLinearSolver(Parameters settings, typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_UU_block, typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_PP_block ): BaseType () { KRATOS_TRY Parameters default_parameters( R"( { "solver_type": "MixedUPLinearSolver", "velocity_solver" : { "solver_type":"BICGSTABSolver" }, "pressure_solver" : { "solver_type":"CGSolver" } "tolerance" : 1.0e-6, "max_iteration" : 200, "gmres_krylov_space_dimension" : 100 } )" ); //now validate agains defaults -- this also ensures no type mismatch settings.ValidateAndAssignDefaults(default_parameters); this->SetTolerance( settings["tolerance"].GetDouble() ); this->SetMaxIterationsNumber( settings["max_iteration"].GetInt() ); mm = settings["gmres_krylov_space_dimension"].GetInt(); //storing other data mpsolver_UU_block = psolver_UU_block; mpsolver_PP_block = psolver_PP_block; mBlocksAreAllocated = false; mis_initialized = false; KRATOS_CATCH("") } /// Copy constructor. MixedUPLinearSolver (const MixedUPLinearSolver& Other) { KRATOS_THROW_ERROR (std::logic_error,"copy constructor not correctly implemented",""); } /// Destructor. ~MixedUPLinearSolver() override {} ///@} ///@name Operators ///@{ /// Assignment operator. MixedUPLinearSolver& operator= (const MixedUPLinearSolver& Other) { return *this; } ///@} ///@name Operations ///@{ /** This function is designed to be called as few times as possible. It creates the data structures * that only depend on the connectivity of the matrix (and not on its coefficients) * so that the memory can be allocated once and expensive operations can be done only when strictly * needed @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ void Initialize (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { if (mBlocksAreAllocated == true) { mpsolver_UU_block->Initialize(mK, mu, mru); mpsolver_PP_block->Initialize(mS, mp, mrp); mis_initialized = true; } else { std::cout << "linear solver intialization is deferred to the moment at which blocks are available" << std::endl; } } /** This function is designed to be called every time the coefficients change in the system * that is, normally at the beginning of each solve. * For example if we are implementing a direct solver, this is the place to do the factorization * so that then the backward substitution can be performed effectively more than once @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ void InitializeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { //copy to local matrices if (mBlocksAreAllocated == false) { FillBlockMatrices (true, rA, mK, mG, mD, mS); mBlocksAreAllocated = true; } else { FillBlockMatrices (false, rA, mK, mG, mD, mS); mBlocksAreAllocated = true; } if(mis_initialized == false) this->Initialize(rA,rX,rB); //initialize solvers mpsolver_UU_block->InitializeSolutionStep(mK, mu, mru); mpsolver_PP_block->InitializeSolutionStep(mS, mp, mrp); } /** This function actually performs the solution work, eventually taking advantage of what was done before in the * Initialize and InitializeSolutionStep functions. @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ void PerformSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { unsigned int m = mm; unsigned int max_iter = BaseType::GetMaxIterationsNumber(); double tol = BaseType::GetTolerance(); gmres_solve (rA,rX,rB,m,max_iter,tol); } /** This function is designed to be called at the end of the solve step. * for example this is the place to remove any data that we do not want to save for later @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ void FinalizeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { mpsolver_UU_block->FinalizeSolutionStep(mK, mu, mru); mpsolver_PP_block->FinalizeSolutionStep(mS, mp, mrp); } /** This function is designed to clean up all internal data in the solver. * Clear is designed to leave the solver object as if newly created. * After a clear a new Initialize is needed */ void Clear() override { mK.clear(); mG.clear(); mD.clear(); mS.clear(); mBlocksAreAllocated = false; mpsolver_UU_block->Clear(); mpsolver_PP_block->Clear(); mu.clear(); mp.clear(); mru.clear(); mrp.clear(); mis_initialized = false; } /** Normal solve method. Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods. @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ bool Solve(SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { if (mis_initialized == false) this->Initialize (rA,rX,rB); this->InitializeSolutionStep (rA,rX,rB); this->PerformSolutionStep (rA,rX,rB); this->FinalizeSolutionStep (rA,rX,rB); return false; } /** Multi solve method for solving a set of linear systems with same coefficient matrix. Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods. @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ bool Solve (SparseMatrixType& rA, DenseMatrixType& rX, DenseMatrixType& rB) override { return false; } /** Eigenvalue and eigenvector solve method for derived eigensolvers */ void Solve (SparseMatrixType& K, SparseMatrixType& M, DenseVectorType& Eigenvalues, DenseMatrixType& Eigenvectors) override {} /** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example * when solving a mixed u-p problem, it is important to identify the row associated to v and p. * another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers * which require knowledge on the spatial position of the nodes associated to a given dof. * This function tells if the solver requires such data */ bool AdditionalPhysicalDataIsNeeded() override { return true; } /** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example * when solving a mixed u-p problem, it is important to identify the row associated to v and p. * another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers * which require knowledge on the spatial position of the nodes associated to a given dof. * This function is the place to eventually provide such data */ void ProvideAdditionalData ( SparseMatrixType& rA, VectorType& rX, VectorType& rB, typename ModelPart::DofsArrayType& rdof_set, ModelPart& r_model_part ) override { //count pressure dofs unsigned int n_pressure_dofs = 0; unsigned int tot_active_dofs = 0; for (ModelPart::DofsArrayType::iterator it = rdof_set.begin(); it!=rdof_set.end(); it++) { if (it->EquationId() < rA.size1()) { tot_active_dofs += 1; if (it->GetVariable().Key() == PRESSURE) n_pressure_dofs += 1; } } if (tot_active_dofs != rA.size1() ) KRATOS_THROW_ERROR (std::logic_error,"total system size does not coincide with the free dof map",""); //resize arrays as needed mpressure_indices.resize (n_pressure_dofs,false); unsigned int other_dof_size = tot_active_dofs - n_pressure_dofs; mother_indices.resize (other_dof_size,false); mglobal_to_local_indexing.resize (tot_active_dofs,false); mis_pressure_block.resize (tot_active_dofs,false); //construct aux_lists as needed //"other_counter[i]" i will contain the position in the global system of the i-th NON-pressure node //"pressure_counter[i]" will contain the in the global system of the i-th NON-pressure node // //mglobal_to_local_indexing[i] will contain the position in the local blocks of the unsigned int pressure_counter = 0; unsigned int other_counter = 0; unsigned int global_pos = 0; for (ModelPart::DofsArrayType::iterator it = rdof_set.begin(); it!=rdof_set.end(); it++) { if (it->EquationId() < rA.size1()) { if (it->GetVariable().Key() == PRESSURE) { mpressure_indices[pressure_counter] = global_pos; mglobal_to_local_indexing[global_pos] = pressure_counter; mis_pressure_block[global_pos] = true; pressure_counter++; } else { mother_indices[other_counter] = global_pos; mglobal_to_local_indexing[global_pos] = other_counter; mis_pressure_block[global_pos] = false; other_counter++; } global_pos++; } } } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "Linear solver"; } /// Print information about this object. void PrintInfo (std::ostream& rOStream) const override { rOStream << "Linear solver"; } /// Print object's data. void PrintData (std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///this function generates the subblocks of matrix A ///as A = ( K G ) u /// ( D S ) p /// subblocks are allocated or nor depending on the value of "need_allocation" void FillBlockMatrices (bool need_allocation, SparseMatrixType& rA, SparseMatrixType& K, SparseMatrixType& G, SparseMatrixType& D, SparseMatrixType& S ) { KRATOS_TRY //get access to A data const std::size_t* index1 = rA.index1_data().begin(); const std::size_t* index2 = rA.index2_data().begin(); const double* values = rA.value_data().begin(); SparseMatrixType L(mpressure_indices.size(),mpressure_indices.size() ); if (need_allocation == true) { K.clear(); G.clear(); D.clear(); S.clear(); L.clear(); //do allocation K.resize (mother_indices.size() ,mother_indices.size() ); G.resize (mother_indices.size() ,mpressure_indices.size() ); D.resize (mpressure_indices.size(),mother_indices.size() ); S.resize (mpressure_indices.size(),mpressure_indices.size() ); mrp.resize(mpressure_indices.size() ); mru.resize(mother_indices.size() ); mp.resize(mpressure_indices.size()); mu.resize(mother_indices.size()); //KRATOS_WATCH (mglobal_to_local_indexing); //allocate the blocks by push_back for (unsigned int i=0; i<rA.size1(); i++) { unsigned int row_begin = index1[i]; unsigned int row_end = index1[i+1]; unsigned int local_row_id = mglobal_to_local_indexing[i]; if ( mis_pressure_block[i] == false) //either K or G { for (unsigned int j=row_begin; j<row_end; j++) { unsigned int col_index = index2[j]; double value = values[j]; unsigned int local_col_id = mglobal_to_local_indexing[col_index]; if (mis_pressure_block[col_index] == false) //K block K.push_back ( local_row_id, local_col_id, value); else //G block G.push_back ( local_row_id, local_col_id, value); } } else //either D or S { for (unsigned int j=row_begin; j<row_end; j++) { unsigned int col_index = index2[j]; double value = values[j]; unsigned int local_col_id = mglobal_to_local_indexing[col_index]; if (mis_pressure_block[col_index] == false) //D block D.push_back ( local_row_id, local_col_id, value); else //S block L.push_back ( local_row_id, local_col_id, value); } } } //allocate the schur complement ConstructSystemMatrix(S,G,D,L); VectorType diagK (mother_indices.size() ); ComputeDiagonalByLumping (K,diagK); //fill the shur complement CalculateShurComplement(S,K,G,D,L,diagK); } else //allocation is not needed so only do copying { for (unsigned int i=0; i<rA.size1(); i++) { unsigned int row_begin = index1[i]; unsigned int row_end = index1[i+1]; unsigned int local_row_id = mglobal_to_local_indexing[i]; if ( mis_pressure_block[i] == false ) //either K or G { for (unsigned int j=row_begin; j<row_end; j++) { unsigned int col_index = index2[j]; double value = values[j]; unsigned int local_col_id = mglobal_to_local_indexing[col_index]; if (mis_pressure_block[col_index] == false) //K block K( local_row_id, local_col_id) = value; else //G block G( local_row_id, local_col_id) = value; } } else //either D or S { for (unsigned int j=row_begin; j<row_end; j++) { unsigned int col_index = index2[j]; double value = values[j]; unsigned int local_col_id = mglobal_to_local_indexing[col_index]; if (mis_pressure_block[col_index] == false) //D block D( local_row_id, local_col_id) = value; else //S block L( local_row_id, local_col_id) = value; } } } VectorType diagK (mother_indices.size() ); ComputeDiagonalByLumping (K,diagK); //fill the shur complement CalculateShurComplement(S,K,G,D,L,diagK); } KRATOS_CATCH ("") } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ /// A counted pointer to the reorderer object. typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpsolver_UU_block; typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpsolver_PP_block; unsigned int mm; bool mBlocksAreAllocated; bool mis_initialized; DenseVector<unsigned int> mpressure_indices; DenseVector<unsigned int> mother_indices; DenseVector<int> mglobal_to_local_indexing; DenseVector<int> mis_pressure_block; SparseMatrixType mK; SparseMatrixType mG; SparseMatrixType mD; SparseMatrixType mS; VectorType mrp; VectorType mru; VectorType mp; VectorType mu; ///@} ///@name Private Operators ///@{ inline void GeneratePlaneRotation (const double &dx, const double &dy, double &cs, double &sn) { if (dy == 0.0) { cs = 1.0; sn = 0.0; } else if (dx == 0.0) { cs = 0.0; sn = 1.0; } else { const double rnorm = 1.0/sqrt (dx*dx + dy*dy); cs = fabs (dx) * rnorm; sn = cs * dy / dx; } } inline void ApplyPlaneRotation (double &dx, double &dy, const double &cs, const double &sn) { double temp = cs * dx + sn * dy; dy = cs * dy - sn * dx; dx = temp; } void Update (VectorType& y, VectorType& x, int k, Matrix& h, VectorType& s, std::vector< VectorType >& V) { for (unsigned int i=0; i<s.size(); i++) y[i] = s[i]; /* for(unsigned int i=s.size(); i<y.size(); i++) y[i] = 0.0;*/ // Backsolve: for (int i = k; i >= 0; --i) { y (i) /= h (i,i); for (int j = i - 1; j >= 0; --j) y (j) -= h (j,i) * y (i); } //create new search dir for (int j = 0; j <= k; ++j) TSparseSpaceType::UnaliasedAdd (x, y[j], V[j]); // x += y(j)* V[j]; } int gmres_solve ( SparseMatrixType& A, VectorType& x, const VectorType& b, unsigned int& m, unsigned int& max_iter, double& tol) { const unsigned int dim = A.size1(); if (m == 0) KRATOS_THROW_ERROR (std::logic_error,"the dimension of the GMRES krylov space can not be set to zero. Please change the value of m","") if (m > max_iter) m = max_iter; VectorType s (m+1), sn (m+1), w (dim), r (dim), y (m+1); VectorType cs (m+1); Matrix H (m+1, m+1); int restart = 0; //preconditioner solve b and store in Minv_b VectorType preconditioned_b (dim); //TSparseSpaceType::Copy(b, preconditioned_b); //preconditioned_b=b //apply preconditioner SolveBlockPreconditioner (b,preconditioned_b); double normb = TSparseSpaceType::TwoNorm (preconditioned_b); /*KRATOS_WATCH(normb);*/ if (normb < 1e-16) //ARBITRARY SMALL NUMBER! { normb = 1e-16; } //r = b - Ax TSparseSpaceType::Mult (A,x,r); TSparseSpaceType::ScaleAndAdd (1.00, b, -1.00, r); //r = b - r //apply preconditioner and overwrite r SolveBlockPreconditioner (r,r); const double rel_tol = tol*normb; double beta = TSparseSpaceType::TwoNorm (r); if (beta <= rel_tol) //finalize! { tol = beta / normb; max_iter = 0; return 0; } unsigned int j; // int err = 0; std::vector< VectorType > V (m+1); for (j = 0; j <= m; ++j) V[j].resize (dim,false); j = 1; while (j <= max_iter) { TSparseSpaceType::Assign (V[0], 1.0/beta, r); //V[0] = r /(T)beta; TSparseSpaceType::SetToZero (s); s[0] = beta; for (unsigned int i = 0; (i < m) && (j <= max_iter); ++i, ++j) { TSparseSpaceType::Mult (A,V[i],w); //w = A*V[i]; //apply preconditioner and overwrite r SolveBlockPreconditioner (w,w); for (unsigned int k = 0; k <= i; k++) { H (k, i) = TSparseSpaceType::Dot (V[k], w); w -= H (k, i) * V[k]; } const double normw = TSparseSpaceType::TwoNorm (w); H (i+1, i) = normw; /*KRATOS_WATCH(normw);*/ // This breakdown is a good one ... if (normw == 0) TSparseSpaceType::Copy (V[i+1], w); //V[i+1] = w; else TSparseSpaceType::Assign (V[i+1], 1.0/normw, w); //V[i+1] = w / normw; for (unsigned int k = 0; k < i; k++) ApplyPlaneRotation (H (k,i), H (k+1,i), cs (k), sn (k) ); GeneratePlaneRotation (H (i,i), H (i+1,i), cs (i), sn (i) ); ApplyPlaneRotation (H (i,i), H (i+1,i), cs (i), sn (i) ); ApplyPlaneRotation (s (i), s (i+1), cs (i), sn (i) ); beta = fabs (s (i+1) ); std::cout << "iter = " << j << " estimated res ratio = " << beta << std::endl; // KRATOS_WATCH (beta); if (beta <= rel_tol) { this->Update (y, x, i, H, s, V); return 0; } } this->Update (y,x, m - 1, H, s, V); //r = b - Ax TSparseSpaceType::Mult (A,x,r); TSparseSpaceType::ScaleAndAdd (1.00, b, -1.00, r); //r = b - r beta = TSparseSpaceType::TwoNorm (r); std::cout << "number of iterations at convergence = " << j << std::endl; if (beta < rel_tol) { return 0; } ++restart; } // err = 1; return 1; } //this function extracts from a vector which has the size of the //overall r, the part that corresponds to u-dofs void GetUPart (const VectorType& rtot, VectorType& ru) { if (ru.size() != mother_indices.size() ) ru.resize (mother_indices.size(), false); #pragma omp parallel for for (int i = 0; i<static_cast<int>(ru.size()); i++) ru[i] = rtot[mother_indices[i]]; } //this function extracts from a vector which has the size of the //overall r, the part that corresponds to p-dofs void GetPPart (const VectorType& rtot, VectorType& rp) { if (rp.size() != mpressure_indices.size() ) rp.resize (mpressure_indices.size(), false); #pragma omp parallel for for (int i = 0; i<static_cast<int>(rp.size()); i++) rp[i] = rtot[mpressure_indices[i]]; } void WriteUPart (VectorType& rtot, const VectorType& ru) { #pragma omp parallel for for (int i = 0; i< static_cast<int>(ru.size()); i++) rtot[mother_indices[i]] = ru[i]; } void WritePPart (VectorType& rtot, const VectorType& rp) { #pragma omp parallel for for (int i = 0; i< static_cast<int>(rp.size()); i++) rtot[mpressure_indices[i]] = rp[i]; } void ComputeDiagonalByLumping (SparseMatrixType& A,VectorType& diagA) { if (diagA.size() != A.size1() ) diagA.resize (A.size1() ); //get access to A data const std::size_t* index1 = A.index1_data().begin(); // const std::size_t* index2 = A.index2_data().begin(); const double* values = A.value_data().begin(); #pragma omp parallel for for (int i=0; i< static_cast<int>(A.size1()); i++) { unsigned int row_begin = index1[i]; unsigned int row_end = index1[i+1]; double temp = 0.0; for (unsigned int j=row_begin; j<row_end; j++) temp += values[j]*values[j]; diagA[i] = sqrt(temp); } } double CheckMatrix (SparseMatrixType& A) { //get access to A data const std::size_t* index1 = A.index1_data().begin(); const std::size_t* index2 = A.index2_data().begin(); const double* values = A.value_data().begin(); double norm = 0.0; for (unsigned int i=0; i<A.size1(); i++) { unsigned int row_begin = index1[i]; unsigned int row_end = index1[i+1]; if (row_end - row_begin == 0) std::cout << "line " << i << " has no elements" << std::endl; //KRATOS_THROW_ERROR(std::logic_error, "line found with no entries on line ",i) for (unsigned int j=row_begin; j<row_end; j++) { if (index2[j]>A.size2() ) KRATOS_THROW_ERROR (std::logic_error, "array above size of A","") norm += values[j]*values[j]; } } return sqrt (norm); } void SolveBlockPreconditioner (const VectorType& rtot, VectorType& x) { boost::numeric::ublas::noalias(mp) = boost::numeric::ublas::zero_vector<double>(mother_indices.size()); boost::numeric::ublas::noalias(mu) = boost::numeric::ublas::zero_vector<double>(mother_indices.size()); VectorType uaux (mother_indices.size() ); VectorType paux (mpressure_indices.size() ); //get diagonal of K (to be removed) VectorType diagK (mother_indices.size() ); ComputeDiagonalByLumping (mK,diagK); //get the u and p residuals GetUPart (rtot,mru); GetPPart (rtot,mrp); //solve u block mpsolver_UU_block->Solve (mK,mu,mru); //correct pressure block //rp -= D*u TSparseSpaceType::Mult (mD,mu,paux); TSparseSpaceType::UnaliasedAdd (mrp,-1.0,paux); //solve pressure //p = S⁻1*rp mpsolver_PP_block->Solve (mS,mp,mrp); //correct u block //u = G*p TSparseSpaceType::Mult (mG,mp,uaux); #pragma omp parallel for for (int i=0; i< static_cast<int>(mu.size()); i++) mu[i] += uaux[i]/diagK[i]; //write back solution WriteUPart (x,mu); WritePPart (x,mp); } /// Compute the Pressure System Matrix /** * Compute the System Matrix A = L - D*Inv(Diag(S))*G. The multiplication * is performed in random order, so each row will be stored in a temporary * variable, ordered and copied in input matrix A. */ void CalculateShurComplement ( SparseMatrixType& A, SparseMatrixType& K, SparseMatrixType& rG, SparseMatrixType& rD, SparseMatrixType& rL, VectorType& diagK ) { // Retrieve matrices // Compute Inv(Diag(S)) VectorType& rIDiagS = diagK; //KRATOS_WATCH(804) typedef DenseVector<int> IndexVector; //typedef typename SparseMatrixType::iterator1 OuterIt; //typedef typename SparseMatrixType::iterator2 InnerIt; typedef typename boost::numeric::ublas::matrix_row< SparseMatrixType > RowType; int DiagSize = int (diagK.size()); // to avoid comparison between int & unsigned int #pragma omp parallel for for ( int i = 0; i < DiagSize; i++) rIDiagS[i] = 1.0/diagK[i]; OpenMPUtils::PartitionVector Partition; int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::DivideInPartitions (A.size1(),NumThreads,Partition); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); VectorType CurrentRow(K.size2()); for (unsigned int i = 0; i < rL.size1(); i++) CurrentRow[i] = 0.0; IndexVector Next = IndexVector(rL.size1()); //IndexVector& Next = *pNext; // Keeps track of which columns were filled for (unsigned int m=0; m < rL.size1(); m++) Next[m] = -1; std::size_t NumTerms = 0; // Full positions in a row std::vector<unsigned int> UsedCols = std::vector<unsigned int>(); // std::vector<unsigned int>& UsedCols = *pUsedCols; UsedCols.reserve (rL.size1()); for ( int RowIndex = Partition[k] ; RowIndex != Partition[k+1] ; RowIndex++ ) { RowType RowD (rD,RowIndex); RowType RowL (rL,RowIndex); int head = -2; std::size_t Length = 0; // Write L in A for ( typename RowType::iterator ItL = RowL.begin(); ItL != RowL.end(); ItL++ ) { CurrentRow (ItL.index() ) = *ItL; if ( Next[ItL.index()] == -1) { Next[ItL.index()] = head; head = ItL.index(); Length++; } } // Substract D*Inv(Diag(S))*G for ( typename RowType::iterator ItD = RowD.begin(); ItD != RowD.end(); ItD++ ) { RowType RowG (rG,ItD.index() ); for ( typename RowType::iterator ItG = RowG.begin(); ItG != RowG.end(); ItG++ ) { CurrentRow[ItG.index()] -= (*ItD) * rIDiagS[ItD.index()] * (*ItG); if ( Next[ItG.index()] == -1) { Next[ItG.index()] = head; head = ItG.index(); Length++; } } } // Identify full terms for ordering for ( std::size_t i = 0; i < Length; i++) { if ( Next[head] != -1 ) { UsedCols.push_back (head); NumTerms++; } int temp = head; head = Next[head]; // Clear 'Next' for next iteration Next[temp] = -1; } // Sort Column indices SortCols (UsedCols,NumTerms); // Fill matrix row, then clean temporary variables. RowType RowA (A,RowIndex); std::size_t n = 0; unsigned int Col; for ( typename RowType::iterator ItA = RowA.begin(); ItA != RowA.end(); ItA++) { Col = UsedCols[n++]; *ItA = CurrentRow[Col]; CurrentRow[Col] = 0; } NumTerms = 0; UsedCols.resize (0,false); } } //KRATOS_WATCH(896) //add stabilization matrix L /* const std::size_t* L_index1 = rL.index1_data().begin(); const std::size_t* L_index2 = rL.index2_data().begin(); const double* L_values = rL.value_data().begin(); for (unsigned int i=0; i<rL.size1(); i++) { unsigned int row_begin = L_index1[i]; unsigned int row_end = L_index1[i+1]; diagA[i] = 0.0; for (unsigned int j=row_begin; j<row_end; j++) { unsigned int col = L_index2[j]; rS(i,col) += L_values[j]; } }*/ } /// Helper function for Sytem matrix functions void SortCols ( std::vector<unsigned int>& ColList, std::size_t& NumCols) { bool swap = true; unsigned int d = NumCols; int temp; while ( swap || d > 1 ) { swap = false; d = (d+1) /2; for ( unsigned int i=0; i< (NumCols - d); i++) if ( ColList[i+d] < ColList[i] ) { temp = ColList[i+d]; ColList[i+d] = ColList[i]; ColList[i] = temp; swap = true; } } } /// Identify non-zero tems in the system matrix void ConstructSystemMatrix( SparseMatrixType& A, SparseMatrixType& rG, SparseMatrixType& rD, SparseMatrixType& rL ) { typedef DenseVector<int> IndexVector; typedef OpenMPUtils::PartitionVector PartitionVector; //typedef typename SparseMatrixType::iterator1 OuterIt; //typedef typename SparseMatrixType::iterator2 InnerIt; typedef typename boost::numeric::ublas::matrix_row< SparseMatrixType > RowType; PartitionVector Partition; int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::DivideInPartitions(A.size1(),NumThreads,Partition); for ( int k = 0 ; k < NumThreads ; k++) { // This code is serial, the pragma is here to ensure that each // row block is assigned to the processor that will fill it #pragma omp parallel if ( OpenMPUtils::ThisThread() == k) { // Kratos::shared_ptr< IndexVector > pNext( new IndexVector(rL.size1() ) ); // IndexVector& Next = *pNext; // Keeps track of which columns were filled IndexVector Next(rL.size1()); for (unsigned int m = 0; m < rL.size1(); m++) Next[m] = -1; std::size_t NumTerms = 0; // Full positions in a row std::vector<unsigned int> UsedCols; // std::vector<unsigned int>& UsedCols = *pUsedCols; UsedCols.reserve(rL.size1()); for ( int RowIndex = Partition[k] ; RowIndex != Partition[k+1] ; RowIndex++ ) { RowType RowD(rD,RowIndex); RowType RowL(rL,RowIndex); int head = -2; std::size_t Length = 0; // Terms filled by L for ( typename RowType::iterator ItL = RowL.begin(); ItL != RowL.end(); ItL++ ) { if ( Next[ItL.index()] == -1) { Next[ItL.index()] = head; head = ItL.index(); Length++; } } // Additional terms due to D*Inv(Diag(S))*G for ( typename RowType::iterator ItD = RowD.begin(); ItD != RowD.end(); ItD++ ) { RowType RowG(rG,ItD.index()); for ( typename RowType::iterator ItG = RowG.begin(); ItG != RowG.end(); ItG++ ) { if ( Next[ItG.index()] == -1) { Next[ItG.index()] = head; head = ItG.index(); Length++; } } } // Identify full terms for ordering for ( std::size_t i = 0; i < Length; i++) { if ( Next[head] != -1 ) { UsedCols.push_back(head); NumTerms++; } int temp = head; head = Next[head]; // Clear 'Next' for next iteration Next[temp] = -1; } // Sort Column indices SortCols(UsedCols,NumTerms); // Store row in matrix, clean temporary variables for ( unsigned int i = 0; i < NumTerms; i++) { A.push_back(RowIndex,UsedCols[i],0); } NumTerms = 0; UsedCols.resize(0,false); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class MixedUPLinearSolver ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::istream& operator >> (std::istream& IStream, MixedUPLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis) { return IStream; } /// output stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::ostream& operator << (std::ostream& rOStream, const MixedUPLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis) { rThis.PrintInfo (rOStream); rOStream << std::endl; rThis.PrintData (rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_MIXEDUP_SOLVER_H_INCLUDED defined
perturbations.c
/** @file perturbations.c Documented perturbation module * * Julien Lesgourgues, 23.09.2010 * * Deals with the perturbation evolution. * This module has two purposes: * * - at the beginning; to initialize the perturbations, i.e. to * integrate the perturbation equations, and store temporarily the terms * contributing to the source functions as a function of conformal * time. Then, to perform a few manipulations of these terms in order to * infer the actual source functions \f$ S^{X} (k, \tau) \f$, and to * store them as a function of conformal time inside an interpolation * table. * * - at any time in the code; to evaluate the source functions at a * given conformal time (by interpolating within the interpolation * table). * * Hence the following functions can be called from other modules: * * -# perturb_init() at the beginning (but after background_init() and thermodynamics_init()) * -# perturb_sources_at_tau() at any later time * -# perturb_free() at the end, when no more calls to perturb_sources_at_tau() are needed */ #include "perturbations.h" /** * Source function \f$ S^{X} (k, \tau) \f$ at a given conformal time tau. * * Evaluate source functions at given conformal time tau by reading * the pre-computed table and interpolating. * * @param ppt Input: pointer to perturbation structure containing interpolation tables * @param index_md Input: index of requested mode * @param index_ic Input: index of requested initial condition * @param index_type Input: index of requested source function type * @param tau Input: any value of conformal time * @param psource Output: vector (already allocated) of source function as a function of k * @return the error status */ int perturb_sources_at_tau( struct perturbs * ppt, int index_md, int index_ic, int index_type, double tau, double * psource ) { /** Summary: */ /** - interpolate in pre-computed table contained in ppt */ class_call(array_interpolate_two_bis(ppt->tau_sampling, 1, 0, ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type], ppt->k_size[index_md], ppt->tau_size, tau, psource, ppt->k_size[index_md], ppt->error_message), ppt->error_message, ppt->error_message); return _SUCCESS_; } /** * Initialize the perturbs structure, and in particular the table of source functions. * * Main steps: * * - given the values of the flags describing which kind of * perturbations should be considered (modes: scalar/vector/tensor, * initial conditions, type of source functions needed...), * initialize indices and wavenumber list * * - define the time sampling for the output source functions * * - for each mode (scalar/vector/tensor): initialize the indices of * relevant perturbations, integrate the differential system, * compute and store the source functions. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Output: Initialized perturbation structure * @return the error status */ int perturb_init( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { /** Summary: */ /** - define local variables */ /* running index for modes */ int index_md; /* running index for initial conditions */ int index_ic; /* running index for wavenumbers */ int index_k; /* pointer to one struct perturb_workspace per thread (one if no openmp) */ struct perturb_workspace ** pppw; /* number of threads (always one if no openmp) */ int number_of_threads=1; /* index of the thread (always 0 if no openmp) */ int thread=0; /* This code can be optionally compiled with the openmp option for parallel computation. Inside parallel regions, the use of the command "return" is forbidden. For error management, instead of "return _FAILURE_", we will set the variable below to "abort = _TRUE_". This will lead to a "return _FAILURE_" just after leaving the parallel region. */ int abort; /* unsigned integer that will be set to the size of the workspace */ size_t sz; #ifdef _OPENMP /* instrumentation times */ double tstart, tstop, tspent; #endif /** - perform preliminary checks */ if (ppt->has_perturbations == _FALSE_) { if (ppt->perturbations_verbose > 0) printf("No sources requested. Perturbation module skipped.\n"); return _SUCCESS_; } else { if (ppt->perturbations_verbose > 0) printf("Computing sources\n"); } class_test((ppt->gauge == synchronous) && (pba->has_cdm == _FALSE_), ppt->error_message, "In the synchronous gauge, it is not self-consistent to assume no CDM: the later is used to define the initial timelike hypersurface. You can either add a negligible amount of CDM or switch to newtonian gauge"); class_test ((ppr->tight_coupling_approximation < first_order_MB) || (ppr->tight_coupling_approximation > compromise_CLASS), ppt->error_message, "your tight_coupling_approximation is set to %d, out of range defined in perturbations.h",ppr->tight_coupling_approximation); class_test ((ppr->radiation_streaming_approximation < rsa_null) || (ppr->radiation_streaming_approximation > rsa_none), ppt->error_message, "your radiation_streaming_approximation is set to %d, out of range defined in perturbations.h",ppr->radiation_streaming_approximation); if (pba->has_ur == _TRUE_) { class_test ((ppr->ur_fluid_approximation < ufa_mb) || (ppr->ur_fluid_approximation > ufa_none), ppt->error_message, "your ur_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ur_fluid_approximation); } if (pba->has_ncdm == _TRUE_) { class_test ((ppr->ncdm_fluid_approximation < ncdmfa_mb) || (ppr->ncdm_fluid_approximation > ncdmfa_none), ppt->error_message, "your ncdm_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ncdm_fluid_approximation); } if (pba->has_fld == _TRUE_) { class_test(pba->w0_fld+pba->wa_fld >= 0., ppt->error_message, "So far, the fluid is meant to be negligible at early time, and not to be important for defining the initial conditions of other species. You are using parameters for which this assumption may break down, so maybe it's the case to fully implement the fluid in the initial condition routine"); class_test((pba->w0_fld==-1.) && (pba->wa_fld==0.), ppt->error_message, "Your choice of a fluid with (w0,wa)=(-1,0) is not valid due to instabilities in the unphysical perturbations of such a fluid. Try instead with a plain cosmological constant"); class_test(((pba->w0_fld + pba->wa_fld +1.0)*(pba->w0_fld+1.0)) < 0.0, ppt->error_message, "w crosses -1 between the infinite past and today, and this would lead to divergent perturbation equations for the fluid."); } if (pba->has_dcdm == _TRUE_) { class_test((ppt->has_cdi == _TRUE_) || (ppt->has_bi == _TRUE_) || (ppt->has_nid == _TRUE_) || (ppt->has_niv == _TRUE_), ppt->error_message, "Non-adiabatic initial conditions not coded in presence of decaying dark matter"); } class_test(ppt->has_vectors == _TRUE_, ppt->error_message, "Vectors not coded yet"); if ((ppt->has_niv == _TRUE_) && (ppt->perturbations_verbose > 0)) { printf("Warning: the niv initial conditions in CLASS (and also in CAMB) should still be double-checked: if you want to do it and send feedback, you are welcome!\n"); } if (ppt->has_tensors == _TRUE_) { ppt->evolve_tensor_ur = _FALSE_; ppt->evolve_tensor_ncdm = _FALSE_; switch (ppt->tensor_method) { case (tm_photons_only): break; case (tm_massless_approximation): if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) ppt->evolve_tensor_ur = _TRUE_; break; case (tm_exact): if (pba->has_ur == _TRUE_) ppt->evolve_tensor_ur = _TRUE_; if (pba->has_ncdm == _TRUE_) ppt->evolve_tensor_ncdm = _TRUE_; break; } } /** - initialize all indices and lists in perturbs structure using perturb_indices_of_perturbs() */ class_call(perturb_indices_of_perturbs(ppr, pba, pth, ppt), ppt->error_message, ppt->error_message); if (ppt->z_max_pk > pth->z_rec) { class_test(ppt->has_cmb == _TRUE_, ppt->error_message, "You requested a very high z_pk=%e, higher than z_rec=%e. This works very well when you don't ask for a calculation of the CMB source function(s). Remove any CMB from your output and try e.g. with 'output=mTk' or 'output=mTk,vTk'", ppt->z_max_pk, pth->z_rec); class_test(ppt->has_source_delta_m == _TRUE_, ppt->error_message, "You requested a very high z_pk=%e, higher than z_rec=%e. This works very well when you ask only transfer functions, e.g. with 'output=mTk' or 'output=mTk,vTk'. But if you need the total matter (e.g. with 'mPk', 'dCl', etc.) there is an issue with the calculation of delta_m at very early times. By default, delta_m is a gauge-invariant variable (the density fluctuation in comoving gauge) and this quantity is hard to get accurately at very early times. The solution is to define delta_m as the density fluctuation in the current gauge, synchronous or newtonian. For the moment this must be done manually by commenting the line 'ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2;' in perturb_sources(). In the future there will be an option for doing it in an easier way.", ppt->z_max_pk, pth->z_rec); } /** - define the common time sampling for all sources using perturb_timesampling_for_sources() */ class_call(perturb_timesampling_for_sources(ppr, pba, pth, ppt), ppt->error_message, ppt->error_message); /** - if we want to store perturbations, write titles and allocate storage */ class_call(perturb_prepare_output(pba,ppt), ppt->error_message, ppt->error_message); /** - create an array of workspaces in multi-thread case */ #ifdef _OPENMP #pragma omp parallel { number_of_threads = omp_get_num_threads(); } #endif class_alloc(pppw,number_of_threads * sizeof(struct perturb_workspace *),ppt->error_message); /** - loop over modes (scalar, tensors, etc). For each mode: */ for (index_md = 0; index_md < ppt->md_size; index_md++) { if (ppt->perturbations_verbose > 1) printf("Evolving mode %d/%d\n",index_md+1,ppt->md_size); abort = _FALSE_; sz = sizeof(struct perturb_workspace); #pragma omp parallel \ shared(pppw,ppr,pba,pth,ppt,index_md,abort,number_of_threads) \ private(thread) \ num_threads(number_of_threads) { #ifdef _OPENMP thread=omp_get_thread_num(); #endif /** - --> (a) create a workspace (one per thread in multi-thread case) */ class_alloc_parallel(pppw[thread],sz,ppt->error_message); /** - --> (b) initialize indices of vectors of perturbations with perturb_indices_of_current_vectors() */ class_call_parallel(perturb_workspace_init(ppr, pba, pth, ppt, index_md, pppw[thread]), ppt->error_message, ppt->error_message); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; /** - --> (c) loop over initial conditions and wavenumbers; for each of them, evolve perturbations and compute source functions with perturb_solve() */ for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { if (ppt->perturbations_verbose > 1) printf("Evolving ic %d/%d\n",index_ic+1,ppt->ic_size[index_md]); if (ppt->perturbations_verbose > 1) printf("evolving %d wavenumbers\n",ppt->k_size[index_md]); abort = _FALSE_; #pragma omp parallel \ shared(pppw,ppr,pba,pth,ppt,index_md,index_ic,abort,number_of_threads) \ private(index_k,thread,tstart,tstop,tspent) \ num_threads(number_of_threads) { #ifdef _OPENMP thread=omp_get_thread_num(); tspent=0.; #endif #pragma omp for schedule (dynamic) /* integrating backwards is slightly more optimal for parallel runs */ //for (index_k = 0; index_k < ppt->k_size; index_k++) { for (index_k = ppt->k_size[index_md]-1; index_k >=0; index_k--) { if ((ppt->perturbations_verbose > 2) && (abort == _FALSE_)) { printf("evolving mode k=%e /Mpc (%d/%d)",ppt->k[index_md][index_k],index_k+1,ppt->k_size[index_md]); if (pba->sgnK != 0) printf(" (for scalar modes, corresponds to nu=%e)",sqrt(ppt->k[index_md][index_k]*ppt->k[index_md][index_k]+pba->K)/sqrt(pba->sgnK*pba->K)); printf("\n"); } #ifdef _OPENMP tstart = omp_get_wtime(); #endif class_call_parallel(perturb_solve(ppr, pba, pth, ppt, index_md, index_ic, index_k, pppw[thread]), ppt->error_message, ppt->error_message); #ifdef _OPENMP tstop = omp_get_wtime(); tspent += tstop-tstart; #endif #pragma omp flush(abort) } /* end of loop over wavenumbers */ #ifdef _OPENMP if (ppt->perturbations_verbose>1) printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n", __func__,tspent,omp_get_thread_num()); #endif } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } /* end of loop over initial conditions */ abort = _FALSE_; #pragma omp parallel \ shared(pppw,ppt,index_md,abort,number_of_threads) \ private(thread) \ num_threads(number_of_threads) { #ifdef _OPENMP thread=omp_get_thread_num(); #endif class_call_parallel(perturb_workspace_free(ppt,index_md,pppw[thread]), ppt->error_message, ppt->error_message); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } /* end loop over modes */ free(pppw); return _SUCCESS_; } /** * Free all memory space allocated by perturb_init(). * * To be called at the end of each run, only when no further calls to * perturb_sources_at_tau() are needed. * * @param ppt Input: perturbation structure to be freed * @return the error status */ int perturb_free( struct perturbs * ppt ) { int index_md,index_ic,index_type; int filenum; if (ppt->has_perturbations == _TRUE_) { for (index_md = 0; index_md < ppt->md_size; index_md++) { for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) { free(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type]); } } free(ppt->sources[index_md]); free(ppt->k[index_md]); } free(ppt->tau_sampling); free(ppt->tp_size); free(ppt->ic_size); free(ppt->k); free(ppt->k_size_cmb); free(ppt->k_size_cl); free(ppt->k_size); free(ppt->sources); /** Stuff related to perturbations output: */ /** - Free non-NULL pointers */ if (ppt->index_k_output_values != NULL) free(ppt->index_k_output_values); for (filenum = 0; filenum<_MAX_NUMBER_OF_K_FILES_; filenum++){ if (ppt->scalar_perturbations_data[filenum] != NULL) free(ppt->scalar_perturbations_data[filenum]); if (ppt->vector_perturbations_data[filenum] != NULL) free(ppt->vector_perturbations_data[filenum]); if (ppt->tensor_perturbations_data[filenum] != NULL) free(ppt->tensor_perturbations_data[filenum]); } } return _SUCCESS_; } /** * Initialize all indices and allocate most arrays in perturbs structure. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Input/Output: Initialized perturbation structure * @return the error status */ int perturb_indices_of_perturbs( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { /** Summary: */ /** - define local variables */ int index_type; int index_md; int index_ic; int index_type_common; /** - count modes (scalar, vector, tensor) and assign corresponding indices */ index_md = 0; class_define_index(ppt->index_md_scalars,ppt->has_scalars,index_md,1); class_define_index(ppt->index_md_vectors,ppt->has_vectors,index_md,1); class_define_index(ppt->index_md_tensors,ppt->has_tensors,index_md,1); ppt->md_size = index_md; class_test(index_md == 0, ppt->error_message, "you should have at least one out of {scalars, vectors, tensors} !!!"); /** - allocate array of number of types for each mode, ppt->tp_size[index_md] */ class_alloc(ppt->tp_size,ppt->md_size*sizeof(int),ppt->error_message); /** - allocate array of number of initial conditions for each mode, ppt->ic_size[index_md] */ class_alloc(ppt->ic_size,ppt->md_size*sizeof(int),ppt->error_message); /** - allocate array of arrays of source functions for each mode, ppt->source[index_md] */ class_alloc(ppt->sources,ppt->md_size * sizeof(double *),ppt->error_message); /** - initialization of all flags to false (will eventually be set to true later) */ ppt->has_cmb = _FALSE_; ppt->has_lss = _FALSE_; ppt->has_source_t = _FALSE_; ppt->has_source_p = _FALSE_; ppt->has_source_delta_m = _FALSE_; ppt->has_source_delta_g = _FALSE_; ppt->has_source_delta_b = _FALSE_; ppt->has_source_delta_cdm = _FALSE_; ppt->has_source_delta_dcdm = _FALSE_; ppt->has_source_delta_fld = _FALSE_; ppt->has_source_delta_scf = _FALSE_; ppt->has_source_delta_dr = _FALSE_; ppt->has_source_delta_ur = _FALSE_; ppt->has_source_delta_ncdm = _FALSE_; ppt->has_source_theta_m = _FALSE_; ppt->has_source_theta_g = _FALSE_; ppt->has_source_theta_b = _FALSE_; ppt->has_source_theta_cdm = _FALSE_; ppt->has_source_theta_dcdm = _FALSE_; ppt->has_source_theta_fld = _FALSE_; ppt->has_source_theta_scf = _FALSE_; ppt->has_source_theta_dr = _FALSE_; ppt->has_source_theta_ur = _FALSE_; ppt->has_source_theta_ncdm = _FALSE_; ppt->has_source_phi = _FALSE_; ppt->has_source_phi_prime = _FALSE_; ppt->has_source_phi_plus_psi = _FALSE_; ppt->has_source_psi = _FALSE_; /** - source flags and indices, for sources that all modes have in common (temperature, polarization, ...). For temperature, the term t2 is always non-zero, while other terms are non-zero only for scalars and vectors. For polarization, the term e is always non-zero, while the term b is only for vectors and tensors. */ if (ppt->has_cl_cmb_temperature == _TRUE_) { ppt->has_source_t = _TRUE_; ppt->has_cmb = _TRUE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { ppt->has_source_p = _TRUE_; ppt->has_cmb = _TRUE_; } index_type = 0; class_define_index(ppt->index_tp_t2,ppt->has_source_t,index_type,1); class_define_index(ppt->index_tp_p,ppt->has_source_p,index_type,1); index_type_common = index_type; /* indices for perturbed recombination */ class_define_index(ppt->index_tp_perturbed_recombination_delta_temp,ppt->has_perturbed_recombination,index_type,1); class_define_index(ppt->index_tp_perturbed_recombination_delta_chi,ppt->has_perturbed_recombination,index_type,1); /** - define k values with perturb_get_k_list() */ class_call(perturb_get_k_list(ppr, pba, pth, ppt), ppt->error_message, ppt->error_message); /** - loop over modes. Initialize flags and indices which are specific to each mode. */ for (index_md = 0; index_md < ppt->md_size; index_md++) { /** - (a) scalars */ if (_scalars_) { /** - --> source flags and indices, for sources that are specific to scalars */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) || (ppt->has_cl_lensing_potential)) { ppt->has_lss = _TRUE_; ppt->has_source_phi_plus_psi = _TRUE_; } if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_nl_corrections_based_on_delta_m)) { ppt->has_lss = _TRUE_; ppt->has_source_delta_m = _TRUE_; } if (ppt->has_density_transfers == _TRUE_) { ppt->has_lss = _TRUE_; ppt->has_source_delta_g = _TRUE_; ppt->has_source_delta_b = _TRUE_; if (pba->has_cdm == _TRUE_) ppt->has_source_delta_cdm = _TRUE_; if (pba->has_dcdm == _TRUE_) ppt->has_source_delta_dcdm = _TRUE_; if (pba->has_fld == _TRUE_) ppt->has_source_delta_fld = _TRUE_; if (pba->has_scf == _TRUE_) ppt->has_source_delta_scf = _TRUE_; if (pba->has_ur == _TRUE_) ppt->has_source_delta_ur = _TRUE_; if (pba->has_dr == _TRUE_) ppt->has_source_delta_dr = _TRUE_; if (pba->has_ncdm == _TRUE_) ppt->has_source_delta_ncdm = _TRUE_; // Thanks to the following lines, (phi,psi) are also stored as sources // (Obtained directly in newtonian gauge, infereed from (h,eta) in synchronous gauge). // If density transfer functions are requested in the (default) CLASS format, // (phi, psi) will be appended to the delta_i's in the final output. ppt->has_source_phi = _TRUE_; ppt->has_source_psi = _TRUE_; } if (ppt->has_velocity_transfers == _TRUE_) { ppt->has_lss = _TRUE_; ppt->has_source_theta_g = _TRUE_; ppt->has_source_theta_b = _TRUE_; if ((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous)) ppt->has_source_theta_cdm = _TRUE_; if (pba->has_dcdm == _TRUE_) ppt->has_source_theta_dcdm = _TRUE_; if (pba->has_fld == _TRUE_) ppt->has_source_theta_fld = _TRUE_; if (pba->has_scf == _TRUE_) ppt->has_source_theta_scf = _TRUE_; if (pba->has_ur == _TRUE_) ppt->has_source_theta_ur = _TRUE_; if (pba->has_dr == _TRUE_) ppt->has_source_theta_dr = _TRUE_; if (pba->has_ncdm == _TRUE_) ppt->has_source_theta_ncdm = _TRUE_; } if (ppt->has_cl_number_count == _TRUE_) { ppt->has_lss = _TRUE_; if (ppt->has_nc_density == _TRUE_) { ppt->has_source_delta_m = _TRUE_; } if (ppt->has_nc_rsd == _TRUE_) { ppt->has_source_theta_m = _TRUE_; } if (ppt->has_nc_lens == _TRUE_) { ppt->has_source_phi_plus_psi = _TRUE_; } if (ppt->has_nc_gr == _TRUE_) { ppt->has_source_phi = _TRUE_; ppt->has_source_psi = _TRUE_; ppt->has_source_phi_prime = _TRUE_; ppt->has_source_phi_plus_psi = _TRUE_; } } index_type = index_type_common; class_define_index(ppt->index_tp_t0, ppt->has_source_t, index_type,1); class_define_index(ppt->index_tp_t1, ppt->has_source_t, index_type,1); class_define_index(ppt->index_tp_delta_m, ppt->has_source_delta_m, index_type,1); class_define_index(ppt->index_tp_delta_g, ppt->has_source_delta_g, index_type,1); class_define_index(ppt->index_tp_delta_b, ppt->has_source_delta_b, index_type,1); class_define_index(ppt->index_tp_delta_cdm, ppt->has_source_delta_cdm, index_type,1); class_define_index(ppt->index_tp_delta_dcdm, ppt->has_source_delta_dcdm,index_type,1); class_define_index(ppt->index_tp_delta_fld, ppt->has_source_delta_fld, index_type,1); class_define_index(ppt->index_tp_delta_scf, ppt->has_source_delta_scf, index_type,1); class_define_index(ppt->index_tp_delta_dr, ppt->has_source_delta_dr, index_type,1); class_define_index(ppt->index_tp_delta_ur, ppt->has_source_delta_ur, index_type,1); class_define_index(ppt->index_tp_delta_ncdm1,ppt->has_source_delta_ncdm,index_type,pba->N_ncdm); class_define_index(ppt->index_tp_theta_m, ppt->has_source_theta_m, index_type,1); class_define_index(ppt->index_tp_theta_g, ppt->has_source_theta_g, index_type,1); class_define_index(ppt->index_tp_theta_b, ppt->has_source_theta_b, index_type,1); class_define_index(ppt->index_tp_theta_cdm, ppt->has_source_theta_cdm, index_type,1); class_define_index(ppt->index_tp_theta_dcdm, ppt->has_source_theta_dcdm,index_type,1); class_define_index(ppt->index_tp_theta_fld, ppt->has_source_theta_fld, index_type,1); class_define_index(ppt->index_tp_theta_scf, ppt->has_source_theta_scf, index_type,1); class_define_index(ppt->index_tp_theta_dr, ppt->has_source_theta_dr, index_type,1); class_define_index(ppt->index_tp_theta_ur, ppt->has_source_theta_ur, index_type,1); class_define_index(ppt->index_tp_theta_ncdm1,ppt->has_source_theta_ncdm,index_type,pba->N_ncdm); class_define_index(ppt->index_tp_phi, ppt->has_source_phi, index_type,1); class_define_index(ppt->index_tp_phi_prime, ppt->has_source_phi_prime, index_type,1); class_define_index(ppt->index_tp_phi_plus_psi,ppt->has_source_phi_plus_psi,index_type,1); class_define_index(ppt->index_tp_psi, ppt->has_source_psi, index_type,1); ppt->tp_size[index_md] = index_type; class_test(index_type == 0, ppt->error_message, "inconsistent input: you asked for scalars, so you should have at least one non-zero scalar source type (temperature, polarization, lensing/gravitational potential, ...). Please adjust your input."); /** - --> count scalar initial conditions (for scalars: ad, cdi, nid, niv; for tensors: only one) and assign corresponding indices */ index_ic = 0; class_define_index(ppt->index_ic_ad, ppt->has_ad, index_ic,1); class_define_index(ppt->index_ic_bi, ppt->has_bi, index_ic,1); class_define_index(ppt->index_ic_cdi,ppt->has_cdi,index_ic,1); class_define_index(ppt->index_ic_nid,ppt->has_nid,index_ic,1); class_define_index(ppt->index_ic_niv,ppt->has_niv,index_ic,1); ppt->ic_size[index_md] = index_ic; class_test(index_ic == 0, ppt->error_message, "you should have at least one adiabatic or isocurvature initial condition...} !!!"); } /** - (b) vectors */ if (_vectors_) { /** - --> source flags and indices, for sources that are specific to vectors */ index_type = index_type_common; class_define_index(ppt->index_tp_t1,ppt->has_source_t,index_type,1); ppt->tp_size[index_md] = index_type; /* class_test(index_type == 0, ppt->error_message, "inconsistent input: you asked for vectors, so you should have at least one non-zero vector source type (temperature or polarization). Please adjust your input."); */ /** - --> initial conditions for vectors*/ index_ic = 0; /* not coded yet */ ppt->ic_size[index_md] = index_ic; } /** - (c) tensors */ if (_tensors_) { /** - --> source flags and indices, for sources that are specific to tensors */ index_type = index_type_common; /* nothing specific, unlike for vectors and scalars! */ ppt->tp_size[index_md] = index_type; /* class_test(index_type == 0, ppt->error_message, "inconsistent input: you asked for tensors, so you should have at least one non-zero tensor source type (temperature or polarization). Please adjust your input."); */ /** - --> only one initial condition for tensors*/ index_ic = 0; class_define_index(ppt->index_ic_ten,_TRUE_,index_ic,1); ppt->ic_size[index_md] = index_ic; } /** - (d) for each mode, allocate array of arrays of source functions for each initial conditions and wavenumber, (ppt->source[index_md])[index_ic][index_type] */ class_alloc(ppt->sources[index_md], ppt->ic_size[index_md] * ppt->tp_size[index_md] * sizeof(double *), ppt->error_message); } return _SUCCESS_; } /** * Define time sampling for source functions. * * For each type, compute the list of values of tau at which sources * will be sampled. Knowing the number of tau values, allocate all * arrays of source functions. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Input/Output: Initialized perturbation structure * @return the error status */ int perturb_timesampling_for_sources( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { /** Summary: */ /** - define local variables */ int counter; int index_md; int index_type; int index_ic; int last_index_back; int last_index_thermo; int first_index_back; int first_index_thermo; double tau; double tau_ini; double tau_lower; double tau_upper; double tau_mid; double timescale_source; double rate_thermo; double rate_isw_squared; double a_prime_over_a; double a_primeprime_over_a; double * pvecback; double * pvecthermo; /** - allocate background/thermodynamics vectors */ class_alloc(pvecback,pba->bg_size_short*sizeof(double),ppt->error_message); class_alloc(pvecthermo,pth->th_size*sizeof(double),ppt->error_message); /** - first, just count the number of sampling points in order to allocate the array containing all values */ /** - (a) if CMB requested, first sampling point = when the universe stops being opaque; otherwise, start sampling gravitational potential at recombination [however, if perturbed recombination is requested, we also need to start the system before recombination. Otherwise, the initial conditions for gas temperature and ionization fraction perturbations (delta_T = 1/3 delta_b, delta_x_e) are not valid]. */ if ((ppt->has_cmb == _TRUE_)||(ppt->has_perturbed_recombination == _TRUE_)) { /* using bisection, search time tau such that the ratio of thermo to Hubble time scales tau_c/tau_h=aH/kappa' is equal to start_sources_at_tau_c_over_tau_h */ tau_lower = pth->tau_ini; class_call(background_at_tau(pba, tau_lower, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); class_test(pvecback[pba->index_bg_a]* pvecback[pba->index_bg_H]/ pvecthermo[pth->index_th_dkappa] > ppr->start_sources_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for computing sources is inappropriate: it corresponds to an earlier time than the one at which the integration of thermodynamical variables started (tau=%g). You should increase either 'start_sources_at_tau_c_over_tau_h' or 'recfast_z_initial'\n", tau_lower); tau_upper = pth->tau_rec; class_call(background_at_tau(pba, tau_upper, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); class_test(pvecback[pba->index_bg_a]* pvecback[pba->index_bg_H]/ pvecthermo[pth->index_th_dkappa] < ppr->start_sources_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for computing sources is inappropriate: it corresponds to a time after recombination. You should decrease 'start_sources_at_tau_c_over_tau_h'\n"); tau_mid = 0.5*(tau_lower + tau_upper); while (tau_upper - tau_lower > ppr->tol_tau_approx) { class_call(background_at_tau(pba, tau_mid, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); if (pvecback[pba->index_bg_a]* pvecback[pba->index_bg_H]/ pvecthermo[pth->index_th_dkappa] > ppr->start_sources_at_tau_c_over_tau_h) tau_upper = tau_mid; else tau_lower = tau_mid; tau_mid = 0.5*(tau_lower + tau_upper); } tau_ini = tau_mid; } else { /* check the time corresponding to the highest redshift requested in output plus one */ class_call(background_tau_of_z(pba, ppt->z_max_pk+1, &tau_ini), pba->error_message, ppt->error_message); /* obsolete: previous choice was to start always at recombination time */ /* tau_ini = pth->tau_rec; */ /* set values of first_index_back/thermo */ class_call(background_at_tau(pba, tau_ini, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); } /** - (b) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where: - --> if CMB requested: timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$; timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today. - --> if CMB not requested: timescale_source = 1/aH; repeat till today. */ counter = 1; last_index_back = first_index_back; last_index_thermo = first_index_thermo; tau = tau_ini; while (tau < pba->conformal_age) { class_call(background_at_tau(pba, tau, pba->short_info, pba->inter_closeby, &last_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_closeby, &last_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); if (ppt->has_cmb == _TRUE_) { /* variation rate of thermodynamics variables */ rate_thermo = pvecthermo[pth->index_th_rate]; /* variation rate of metric due to late ISW effect (important at late times) */ a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + 2. * a_prime_over_a * a_prime_over_a; rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a); /* compute rate */ timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared); } else { /* variation rate given by Hubble time */ a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; timescale_source = a_prime_over_a; } /* check it is non-zero */ class_test(timescale_source == 0., ppt->error_message, "null evolution rate, integration is diverging"); /* compute inverse rate */ timescale_source = 1./timescale_source; class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation, ppt->error_message, "integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source); tau = tau + ppr->perturb_sampling_stepsize*timescale_source; counter++; } /** - --> infer total number of time steps, ppt->tau_size */ ppt->tau_size = counter; /** - --> allocate array of time steps, ppt->tau_sampling[index_tau] */ class_alloc(ppt->tau_sampling,ppt->tau_size * sizeof(double),ppt->error_message); /** - --> repeat the same steps, now filling the array with each tau value: */ /** - --> (b.1.) first sampling point = when the universe stops being opaque */ counter = 0; ppt->tau_sampling[counter]=tau_ini; /** - --> (b.2.) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$; timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today. If CMB not requested: timescale_source = 1/aH; repeat till today. */ last_index_back = first_index_back; last_index_thermo = first_index_thermo; tau = tau_ini; while (tau < pba->conformal_age) { class_call(background_at_tau(pba, tau, pba->short_info, pba->inter_closeby, &last_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_closeby, &last_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); if (ppt->has_cmb == _TRUE_) { /* variation rate of thermodynamics variables */ rate_thermo = pvecthermo[pth->index_th_rate]; /* variation rate of metric due to late ISW effect (important at late times) */ a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + 2. * a_prime_over_a * a_prime_over_a; rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a); /* compute rate */ timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared); } else { a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; timescale_source = a_prime_over_a; } /* check it is non-zero */ class_test(timescale_source == 0., ppt->error_message, "null evolution rate, integration is diverging"); /* compute inverse rate */ timescale_source = 1./timescale_source; class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation, ppt->error_message, "integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source); tau = tau + ppr->perturb_sampling_stepsize*timescale_source; counter++; ppt->tau_sampling[counter]=tau; } /** - last sampling point = exactly today */ ppt->tau_sampling[counter] = pba->conformal_age; free(pvecback); free(pvecthermo); /** - loop over modes, initial conditions and types. For each of them, allocate array of source functions. */ for (index_md = 0; index_md < ppt->md_size; index_md++) { for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) { class_alloc(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type], ppt->k_size[index_md] * ppt->tau_size * sizeof(double), ppt->error_message); } } } return _SUCCESS_; } /** * Define the number of comoving wavenumbers using the information * passed in the precision structure. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Input: pointer to perturbation structure * @return the error status */ int perturb_get_k_list( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { int index_k, index_k_output, index_mode; double k,k_min=0.,k_rec,step,tau1; double * k_max_cmb; double * k_max_cl; double k_max=0.; double scale2; double *tmp_k_list; int newk_size, index_newk, add_k_output_value; /** Summary: */ class_test(ppr->k_step_transition == 0., ppt->error_message, "stop to avoid division by zero"); class_test(pth->rs_rec == 0., ppt->error_message, "stop to avoid division by zero"); /** - allocate arrays related to k list for each mode */ class_alloc(ppt->k_size_cmb, ppt->md_size*sizeof(int), ppt->error_message); class_alloc(ppt->k_size_cl, ppt->md_size*sizeof(int), ppt->error_message); class_alloc(ppt->k_size, ppt->md_size*sizeof(int), ppt->error_message); class_alloc(ppt->k, ppt->md_size*sizeof(double*), ppt->error_message); class_calloc(k_max_cmb, ppt->md_size, sizeof(double), ppt->error_message); class_calloc(k_max_cl, ppt->md_size, sizeof(double), ppt->error_message); /** - scalar modes */ if (ppt->has_scalars == _TRUE_) { /* first value */ if (pba->sgnK == 0) { /* K<0 (flat) : start close to zero */ k_min=ppr->k_min_tau0/pba->conformal_age; } else if (pba->sgnK == -1) { /* K<0 (open) : start close to sqrt(-K) (in transfer modules, for scalars, this will correspond to q close to zero; for vectors and tensors, this value is even smaller than the minimum necessary value) */ k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2)); } else if (pba->sgnK == 1) { /* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */ k_min = sqrt((8.-1.e-4)*pba->K); } /** - --> find k_max (as well as k_max_cmb[ppt->index_md_scalars], k_max_cl[ppt->index_md_scalars]) */ k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */ k_max_cmb[ppt->index_md_scalars] = k_min; k_max_cl[ppt->index_md_scalars] = k_min; k_max = k_min; if (ppt->has_cls == _TRUE_) { /* find k_max_cmb[ppt->index_md_scalars] : */ /* choose a k_max_cmb[ppt->index_md_scalars] corresponding to a wavelength on the last scattering surface seen today under an angle smaller than pi/lmax: this is equivalent to k_max_cl[ppt->index_md_scalars]*[comvoving.ang.diameter.distance] > l_max */ k_max_cmb[ppt->index_md_scalars] = ppr->k_max_tau0_over_l_max*ppt->l_scalar_max /pba->conformal_age/pth->angular_rescaling; k_max_cl[ppt->index_md_scalars] = k_max_cmb[ppt->index_md_scalars]; k_max = k_max_cmb[ppt->index_md_scalars]; /* find k_max_cl[ppt->index_md_scalars] : */ /* if we need density/lensing Cl's, we must impose a stronger condition, such that the minimum wavelength on the shell corresponding to the center of smallest redshift bin is seen under an angle smaller than pi/lmax. So we must multiply our previous k_max_cl[ppt->index_md_scalars] by the ratio tau0/(tau0-tau[center of smallest redshift bin]). Note that we could do the same with the lensing potential if we needed a very precise C_l^phi-phi at large l. We don't do it by default, because the lensed ClT, ClE would be marginally affected. */ if ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_)) { class_call(background_tau_of_z(pba, ppt->selection_mean[0], &tau1), pba->error_message, ppt->error_message); k_max_cl[ppt->index_md_scalars] = MAX(k_max_cl[ppt->index_md_scalars],ppr->k_max_tau0_over_l_max*ppt->l_lss_max/(pba->conformal_age-tau1)); // to be very accurate we should use angular diameter distance to given redshift instead of comoving radius: would implement corrections depending on curvature k_max = k_max_cl[ppt->index_md_scalars]; } } /* find k_max: */ if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) k_max = MAX(k_max,ppt->k_max_for_pk); if (ppt->has_nl_corrections_based_on_delta_m == _TRUE_) k_max = MAX(k_max,ppr->halofit_min_k_max); /** - --> test that result for k_min, k_max make sense */ class_test(k_min<0., ppt->error_message, "buggy definition of k_min"); class_test(k_max<0., ppt->error_message, "buggy definition of k_max"); class_test(k_max<k_min, ppt->error_message, "buggy definition of k_min and/or k_max"); /* if K>0, the transfer function will be calculated for discrete integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and m=0,1,2 for scalars/vectors/tensors. However we are free to define in the perturbation module some arbitrary values of k: later on, the transfer module will interpolate at values of k corresponding exactly to integer values of nu. Hence, apart from the value of k_min and the step size in the vicinity of k_min, we define exactly the same sampling in the three cases K=0, K<0, K>0 */ /* allocate array with, for the moment, the largest possible size */ class_alloc(ppt->k[ppt->index_md_scalars], ((int)((k_max_cmb[ppt->index_md_scalars]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+ (int)(MAX(ppr->k_per_decade_for_pk,ppr->k_per_decade_for_bao)*log(k_max/k_min)/log(10.))+3) *sizeof(double),ppt->error_message); /* first value */ index_k=0; k = k_min; ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; /* values until k_max_cmb[ppt->index_md_scalars] */ while (k < k_max_cmb[ppt->index_md_scalars]) { /* the linear step is not constant, it has a step-like shape, centered around the characteristic scale set by the sound horizon at recombination (associated to the comoving wavenumber k_rec) */ step = (ppr->k_step_super + 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.) * (ppr->k_step_sub-ppr->k_step_super)) * k_rec; /* there is one other thing to take into account in the step size. There are two other characteristic scales that matter for the sampling: the Hubble scale today, k0=a0H0, and eventually curvature scale sqrt(|K|). We define "scale2" as the sum of the squared Hubble radius and squared curvature radius. We need to increase the sampling for k<sqrt(scale2), in order to get the first mutipoles accurate enough. The formula below reduces it gradually in the k-->0 limit, by up to a factor 10. The actual stepsize is still fixed by k_step_super, this is just a reduction factor. */ scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K); step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction); class_test(step / k < ppr->smallest_allowed_variation, ppt->error_message, "k step =%e < machine precision : leads either to numerical error or infinite loop", step * k_rec); k += step; class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1], ppt->error_message, "consecutive values of k should differ and should be in growing order"); ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; } ppt->k_size_cmb[ppt->index_md_scalars] = index_k; /* values until k_max_cl[ppt->index_md_scalars] */ while (k < k_max_cl[ppt->index_md_scalars]) { k *= pow(10.,1./(ppr->k_per_decade_for_pk +(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk) *(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4))))); ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; } ppt->k_size_cl[ppt->index_md_scalars] = index_k; /* values until k_max */ while (k < k_max) { k *= pow(10.,1./(ppr->k_per_decade_for_pk +(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk) *(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4))))); ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; } ppt->k_size[ppt->index_md_scalars] = index_k; class_realloc(ppt->k[ppt->index_md_scalars], ppt->k[ppt->index_md_scalars], ppt->k_size[ppt->index_md_scalars]*sizeof(double), ppt->error_message); } /** - vector modes */ if (ppt->has_vectors == _TRUE_) { /* first value */ if (pba->sgnK == 0) { /* K<0 (flat) : start close to zero */ k_min=ppr->k_min_tau0/pba->conformal_age; } else if (pba->sgnK == -1) { /* K<0 (open) : start close to sqrt(-K) (in transfer modules, for scalars, this will correspond to q close to zero; for vectors and tensors, this value is even smaller than the minimum necessary value) */ k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2)); } else if (pba->sgnK == 1) { /* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */ k_min = sqrt((7.-1.e-4)*pba->K); } /** - --> find k_max (as well as k_max_cmb[ppt->index_md_vectors], k_max_cl[ppt->index_md_vectors]) */ k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */ k_max_cmb[ppt->index_md_vectors] = k_min; k_max_cl[ppt->index_md_vectors] = k_min; k_max = k_min; if (ppt->has_cls == _TRUE_) { /* find k_max_cmb: */ /* choose a k_max_cmb corresponding to a wavelength on the last scattering surface seen today under an angle smaller than pi/lmax: this is equivalent to k_max_cl*[comvoving.ang.diameter.distance] > l_max */ k_max_cmb[ppt->index_md_vectors] = ppr->k_max_tau0_over_l_max*ppt->l_vector_max /pba->conformal_age/pth->angular_rescaling; k_max_cl[ppt->index_md_vectors] = k_max_cmb[ppt->index_md_vectors]; k_max = k_max_cmb[ppt->index_md_vectors]; } /** - --> test that result for k_min, k_max make sense */ class_test(k_min<0., ppt->error_message, "buggy definition of k_min"); class_test(k_max<0., ppt->error_message, "buggy definition of k_max"); class_test(k_max<k_min, ppt->error_message, "buggy definition of k_min and/or k_max"); /* if K>0, the transfer function will be calculated for discrete integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and m=0,1,2 for scalars/vectors/tensors. However we are free to define in the perturbation module some arbitrary values of k: later on, the transfer module will interpolate at values of k corresponding exactly to integer values of nu. Hence, apart from the value of k_min and the step size in the vicinity of k_min, we define exactly the same sampling in the three cases K=0, K<0, K>0 */ /* allocate array with, for the moment, the largest possible size */ class_alloc(ppt->k[ppt->index_md_vectors], ((int)((k_max_cmb[ppt->index_md_vectors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1) *sizeof(double),ppt->error_message); /* first value */ index_k=0; k = k_min; ppt->k[ppt->index_md_vectors][index_k] = k; index_k++; /* values until k_max_cmb[ppt->index_md_vectors] */ while (k < k_max_cmb[ppt->index_md_vectors]) { /* the linear step is not constant, it has a step-like shape, centered around the characteristic scale set by the sound horizon at recombination (associated to the comoving wavenumber k_rec) */ step = (ppr->k_step_super + 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.) * (ppr->k_step_sub-ppr->k_step_super)) * k_rec; /* there is one other thing to take into account in the step size. There are two other characteristic scales that matter for the sampling: the Hubble scale today, k0=a0H0, and eventually curvature scale sqrt(|K|). We define "scale2" as the sum of the squared Hubble radius and squared curvature radius. We need to increase the sampling for k<sqrt(scale2), in order to get the first mutipoles accurate enough. The formula below reduces it gradually in the k-->0 limit, by up to a factor 10. The actual stepsize is still fixed by k_step_super, this is just a reduction factor. */ scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K); step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction); class_test(step / k < ppr->smallest_allowed_variation, ppt->error_message, "k step =%e < machine precision : leads either to numerical error or infinite loop", step * k_rec); k += step; class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1], ppt->error_message, "consecutive values of k should differ and should be in growing order"); ppt->k[ppt->index_md_vectors][index_k] = k; index_k++; } ppt->k_size_cmb[ppt->index_md_vectors] = index_k; ppt->k_size_cl[ppt->index_md_vectors] = index_k; ppt->k_size[ppt->index_md_vectors] = index_k; class_realloc(ppt->k[ppt->index_md_vectors], ppt->k[ppt->index_md_vectors], ppt->k_size[ppt->index_md_vectors]*sizeof(double), ppt->error_message); } /** - tensor modes */ if (ppt->has_tensors == _TRUE_) { /* first value */ if (pba->sgnK == 0) { /* K<0 (flat) : start close to zero */ k_min=ppr->k_min_tau0/pba->conformal_age; } else if (pba->sgnK == -1) { /* K<0 (open) : start close to sqrt(-K) (in transfer modules, for scalars, this will correspond to q close to zero; for vectors and tensors, this value is even smaller than the minimum necessary value) */ k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2)); } else if (pba->sgnK == 1) { /* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */ k_min = sqrt((6.-1.e-4)*pba->K); } /** - --> find k_max (as well as k_max_cmb[ppt->index_md_tensors], k_max_cl[ppt->index_md_tensors]) */ k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */ k_max_cmb[ppt->index_md_tensors] = k_min; k_max_cl[ppt->index_md_tensors] = k_min; k_max = k_min; if (ppt->has_cls == _TRUE_) { /* find k_max_cmb[ppt->index_md_tensors]: */ /* choose a k_max_cmb[ppt->index_md_tensors] corresponding to a wavelength on the last scattering surface seen today under an angle smaller than pi/lmax: this is equivalent to k_max_cl[ppt->index_md_tensors]*[comvoving.ang.diameter.distance] > l_max */ k_max_cmb[ppt->index_md_tensors] = ppr->k_max_tau0_over_l_max*ppt->l_tensor_max /pba->conformal_age/pth->angular_rescaling; k_max_cl[ppt->index_md_tensors] = k_max_cmb[ppt->index_md_tensors]; k_max = k_max_cmb[ppt->index_md_tensors]; } /** - --> test that result for k_min, k_max make sense */ class_test(k_min<0., ppt->error_message, "buggy definition of k_min"); class_test(k_max<0., ppt->error_message, "buggy definition of k_max"); class_test(k_max<k_min, ppt->error_message, "buggy definition of k_min and/or k_max"); /* if K>0, the transfer function will be calculated for discrete integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and m=0,1,2 for scalars/vectors/tensors. However we are free to define in the perturbation module some arbitrary values of k: later on, the transfer module will interpolate at values of k corresponding exactly to integer values of nu. Hence, apart from the value of k_min and the step size in the vicinity of k_min, we define exactly the same sampling in the three cases K=0, K<0, K>0 */ /* allocate array with, for the moment, the largest possible size */ class_alloc(ppt->k[ppt->index_md_tensors], ((int)((k_max_cmb[ppt->index_md_tensors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1) *sizeof(double),ppt->error_message); /* first value */ index_k=0; k = k_min; ppt->k[ppt->index_md_tensors][index_k] = k; index_k++; /* values until k_max_cmb[ppt->index_md_tensors] */ while (k < k_max_cmb[ppt->index_md_tensors]) { /* the linear step is not constant, it has a step-like shape, centered around the characteristic scale set by the sound horizon at recombination (associated to the comoving wavenumber k_rec) */ step = (ppr->k_step_super + 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.) * (ppr->k_step_sub-ppr->k_step_super)) * k_rec; /* there is one other thing to take into account in the step size. There are two other characteristic scales that matter for the sampling: the Hubble scale today, k0=a0H0, and eventually curvature scale sqrt(|K|). We define "scale2" as the sum of the squared Hubble radius and squared curvature radius. We need to increase the sampling for k<sqrt(scale2), in order to get the first mutipoles accurate enough. The formula below reduces it gradually in the k-->0 limit, by up to a factor 10. The actual stepsize is still fixed by k_step_super, this is just a reduction factor. */ scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K); step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction); class_test(step / k < ppr->smallest_allowed_variation, ppt->error_message, "k step =%e < machine precision : leads either to numerical error or infinite loop", step * k_rec); k += step; class_test(k <= ppt->k[ppt->index_md_tensors][index_k-1], ppt->error_message, "consecutive values of k should differ and should be in growing order"); ppt->k[ppt->index_md_tensors][index_k] = k; index_k++; } ppt->k_size_cmb[ppt->index_md_tensors] = index_k; ppt->k_size_cl[ppt->index_md_tensors] = index_k; ppt->k_size[ppt->index_md_tensors] = index_k; class_realloc(ppt->k[ppt->index_md_tensors], ppt->k[ppt->index_md_tensors], ppt->k_size[ppt->index_md_tensors]*sizeof(double), ppt->error_message); } /** - If user asked for k_output_values, add those to all k lists: */ if (ppt->k_output_values_num>0){ /* Allocate storage */ class_alloc(ppt->index_k_output_values,sizeof(double)*ppt->md_size*ppt->k_output_values_num,ppt->error_message); /** - --> Find indices in ppt->k[index_md] corresponding to 'k_output_values'. We are assuming that ppt->k is sorted and growing, and we have made sure that ppt->k_output_values is also sorted and growing.*/ for (index_mode=0; index_mode<ppt->md_size; index_mode++){ newk_size = ppt->k_size[index_mode]+ppt->k_output_values_num; class_alloc(tmp_k_list,sizeof(double)*newk_size,ppt->error_message); index_k=0; index_k_output=0; for (index_newk=0; index_newk<newk_size; index_newk++){ /** - --> Decide if we should add k_output_value now. This has to be this complicated, since we can only compare the k-values when both indices are in range.*/ if (index_k >= ppt->k_size[index_mode]) add_k_output_value = _TRUE_; else if (index_k_output >= ppt->k_output_values_num) add_k_output_value = _FALSE_; else if (ppt->k_output_values[index_k_output] < ppt->k[index_mode][index_k]) add_k_output_value = _TRUE_; else add_k_output_value = _FALSE_; if (add_k_output_value == _TRUE_){ tmp_k_list[index_newk] = ppt->k_output_values[index_k_output]; ppt->index_k_output_values[index_mode*ppt->k_output_values_num+index_k_output]=index_newk; index_k_output++; } else{ tmp_k_list[index_newk] = ppt->k[index_mode][index_k]; index_k++; } } free(ppt->k[index_mode]); ppt->k[index_mode] = tmp_k_list; ppt->k_size[index_mode] = newk_size; index_k = newk_size-1; while (ppt->k[index_mode][index_k] > k_max_cl[index_mode]) index_k--; ppt->k_size_cl[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]); index_k = newk_size-1; while (ppt->k[index_mode][index_k] > k_max_cmb[index_mode]) index_k--; ppt->k_size_cmb[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]); /** - --> The two MIN statements are here because in a normal run, the cl and cmb arrays contain a single k value larger than their respective k_max. We are mimicking this behavior. */ } } /* For testing, can be useful to print the k list in a file: FILE * out=fopen("output/k","w"); for (index_k=0; index_k < ppt->k_size[0]; index_k++) { fprintf(out,"%e\n",ppt->k[0][index_k],pba->K); } fclose(out); */ /** - finally, find the global k_min and k_max for the ensemble of all modes 9scalars, vectors, tensors) */ ppt->k_min = _HUGE_; ppt->k_max = 0.; if (ppt->has_scalars == _TRUE_) { ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_scalars][0]); /* first value, inferred from perturbations structure */ ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_scalars][ppt->k_size[ppt->index_md_scalars]-1]); /* last value, inferred from perturbations structure */ } if (ppt->has_vectors == _TRUE_) { ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_vectors][0]); /* first value, inferred from perturbations structure */ ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_vectors][ppt->k_size[ppt->index_md_vectors]-1]); /* last value, inferred from perturbations structure */ } if (ppt->has_tensors == _TRUE_) { ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_tensors][0]); /* first value, inferred from perturbations structure */ ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_tensors][ppt->k_size[ppt->index_md_tensors]-1]); /* last value, inferred from perturbations structure */ } free(k_max_cmb); free(k_max_cl); return _SUCCESS_; } /** * Initialize a perturb_workspace structure. All fields are allocated * here, with the exception of the perturb_vector '-->pv' field, which * is allocated separately in perturb_vector_init. We allocate one * such perturb_workspace structure per thread and per mode * (scalar/../tensor). Then, for each thread, all initial conditions * and wavenumbers will use the same workspace. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param ppw Input/Output: pointer to perturb_workspace structure which fields are allocated or filled here * @return the error status */ int perturb_workspace_init( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ int index_mt=0; int index_ap; int l; /** - Compute maximum l_max for any multipole */; if (_scalars_) { ppw->max_l_max = MAX(ppr->l_max_g, ppr->l_max_pol_g); if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur); if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm); if (pba->has_dr == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_dr); } if (_tensors_) { ppw->max_l_max = MAX(ppr->l_max_g_ten, ppr->l_max_pol_g_ten); if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur); if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm); } /** - Allocate \f$ s_l\f$[ ] array for freestreaming of multipoles (see arXiv:1305.3261) and initialize to 1.0, which is the K=0 value. */ class_alloc(ppw->s_l, sizeof(double)*(ppw->max_l_max+1),ppt->error_message); for (l=0; l<=ppw->max_l_max; l++){ ppw->s_l[l] = 1.0; } /** - define indices of metric perturbations obeying constraint equations (this can be done once and for all, because the vector of metric perturbations is the same whatever the approximation scheme, unlike the vector of quantities to be integrated, which is allocated separately in perturb_vector_init) */ if (_scalars_) { /* newtonian gauge */ if (ppt->gauge == newtonian) { class_define_index(ppw->index_mt_psi,_TRUE_,index_mt,1); /* psi */ class_define_index(ppw->index_mt_phi_prime,_TRUE_,index_mt,1); /* phi' */ } /* synchronous gauge (note that eta is counted in the vector of quantities to be integrated, while here we only consider quantities obeying to constraint equations) */ if (ppt->gauge == synchronous) { class_define_index(ppw->index_mt_h_prime,_TRUE_,index_mt,1); /* h' */ class_define_index(ppw->index_mt_h_prime_prime,_TRUE_,index_mt,1); /* h'' */ class_define_index(ppw->index_mt_eta_prime,_TRUE_,index_mt,1); /* eta' */ class_define_index(ppw->index_mt_alpha,_TRUE_,index_mt,1); /* alpha = (h' + 6 tau') / (2 k**2) */ class_define_index(ppw->index_mt_alpha_prime,_TRUE_,index_mt,1); /* alpha' */ } } if (_vectors_) { /* newtonian gauge */ if (ppt->gauge == newtonian) { class_define_index(ppw->index_mt_V_prime,_TRUE_,index_mt,1); } if (ppt->gauge == synchronous) { class_define_index(ppw->index_mt_hv_prime_prime,_TRUE_,index_mt,1); } } if (_tensors_) { class_define_index(ppw->index_mt_gw_prime_prime,_TRUE_,index_mt,1); } ppw->mt_size = index_mt; /** - allocate some workspace in which we will store temporarily the values of background, thermodynamics, metric and source quantities at a given time */ class_alloc(ppw->pvecback,pba->bg_size_normal*sizeof(double),ppt->error_message); class_alloc(ppw->pvecthermo,pth->th_size*sizeof(double),ppt->error_message); class_alloc(ppw->pvecmetric,ppw->mt_size*sizeof(double),ppt->error_message); /** - count number of approximations, initialize their indices, and allocate their flags */ index_ap=0; class_define_index(ppw->index_ap_tca,_TRUE_,index_ap,1); class_define_index(ppw->index_ap_rsa,_TRUE_,index_ap,1); if (_scalars_) { class_define_index(ppw->index_ap_ufa,pba->has_ur,index_ap,1); class_define_index(ppw->index_ap_ncdmfa,pba->has_ncdm,index_ap,1); } ppw->ap_size=index_ap; if (ppw->ap_size > 0) class_alloc(ppw->approx,ppw->ap_size*sizeof(int),ppt->error_message); /** - For definiteness, initialize approximation flags to arbitrary values (correct values are overwritten in pertub_find_approximation_switches) */ if (_scalars_) { ppw->approx[ppw->index_ap_tca]=(int)tca_on; ppw->approx[ppw->index_ap_rsa]=(int)rsa_off; if (pba->has_ur == _TRUE_) { ppw->approx[ppw->index_ap_ufa]=(int)ufa_off; } if (pba->has_ncdm == _TRUE_) { ppw->approx[ppw->index_ap_ncdmfa]=(int)ncdmfa_off; } } if (_tensors_) { ppw->approx[ppw->index_ap_tca]=(int)tca_on; ppw->approx[ppw->index_ap_rsa]=(int)rsa_off; } /** - allocate fields where some of the perturbations are stored */ if (_scalars_) { if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { class_alloc(ppw->delta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppw->theta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppw->shear_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message); } } return _SUCCESS_; } /** * Free the perturb_workspace structure (with the exception of the * perturb_vector '-->pv' field, which is freed separately in * perturb_vector_free). * * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param ppw Input: pointer to perturb_workspace structure to be freed * @return the error status */ int perturb_workspace_free ( struct perturbs * ppt, int index_md, struct perturb_workspace * ppw ) { free(ppw->s_l); free(ppw->pvecback); free(ppw->pvecthermo); free(ppw->pvecmetric); if (ppw->ap_size > 0) free(ppw->approx); if (_scalars_) { if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { free(ppw->delta_ncdm); free(ppw->theta_ncdm); free(ppw->shear_ncdm); } } free(ppw); return _SUCCESS_; } /** * Solve the perturbation evolution for a given mode, initial * condition and wavenumber, and compute the corresponding source * functions. * * For a given mode, initial condition and wavenumber, this function * finds the time ranges over which the perturbations can be described * within a given approximation. For each such range, it initializes * (or redistributes) perturbations using perturb_vector_init(), and * integrates over time. Whenever a "source sampling time" is passed, * the source terms are computed and stored in the source table using * perturb_sources(). * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input/Output: pointer to the perturbation structure (output source functions S(k,tau) written here) * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param index_ic Input: index of initial condition under consideration (ad, iso...) * @param index_k Input: index of wavenumber * @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces * @return the error status */ int perturb_solve( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, int index_ic, int index_k, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ /* contains all fixed parameters, indices and workspaces used by the perturb_derivs function */ struct perturb_parameters_and_workspace ppaw; /* conformal time */ double tau,tau_lower,tau_upper,tau_mid; /* multipole */ int l; /* index running over time */ int index_tau; /* number of values in the tau_sampling array that should be considered for a given mode */ int tau_actual_size; /* running index over types (temperature, etc) */ int index_type; /* Fourier mode */ double k; /* number of time intervals where the approximation scheme is uniform */ int interval_number; /* index running over such time intervals */ int index_interval; /* number of time intervals where each particular approximation is uniform */ int * interval_number_of; /* edge of intervals where approximation scheme is uniform: tau_ini, tau_switch_1, ..., tau_end */ double * interval_limit; /* array of approximation scheme within each interval: interval_approx[index_interval][index_ap] */ int ** interval_approx; /* index running over approximations */ int index_ap; /* approximation scheme within previous interval: previous_approx[index_ap] */ int * previous_approx; int n_ncdm,is_early_enough; /* function pointer to ODE evolver and names of possible evolvers */ extern int evolver_rk(); extern int evolver_ndf15(); int (*generic_evolver)(); /* Related to the perturbation output */ int (*perhaps_print_variables)(); int index_ikout; /** - initialize indices relevant for back/thermo tables search */ ppw->last_index_back=0; ppw->last_index_thermo=0; ppw->inter_mode = pba->inter_normal; /** - get wavenumber value */ k = ppt->k[index_md][index_k]; class_test(k == 0., ppt->error_message, "stop to avoid division by zero"); /** - If non-zero curvature, update array of free-streaming coefficients ppw->s_l */ if (pba->has_curvature == _TRUE_){ for (l = 0; l<=ppw->max_l_max; l++){ ppw->s_l[l] = sqrt(MAX(1.0-pba->K*(l*l-1.0)/k/k,0.)); } } /** - maximum value of tau for which sources are calculated for this wavenumber */ /* by default, today */ tau_actual_size = ppt->tau_size; /** - using bisection, compute minimum value of tau for which this wavenumber is integrated */ /* will be at least the first time in the background table */ tau_lower = pba->tau_table[0]; class_call(background_at_tau(pba, tau_lower, pba->normal_info, pba->inter_normal, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., pth->inter_normal, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); /* check that this initial time is indeed OK given imposed conditions on kappa' and on k/aH */ class_test(ppw->pvecback[pba->index_bg_a]* ppw->pvecback[pba->index_bg_H]/ ppw->pvecthermo[pth->index_th_dkappa] > ppr->start_small_k_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_small_k_at_tau_c_over_tau_h' up to at least %g, or decrease 'a_ini_over_a_today_default'\n", ppw->pvecback[pba->index_bg_a]* ppw->pvecback[pba->index_bg_H]/ ppw->pvecthermo[pth->index_th_dkappa]); class_test(k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] > ppr->start_large_k_at_tau_h_over_tau_k, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_large_k_at_tau_h_over_tau_k' up to at least %g, or decrease 'a_ini_over_a_today_default'\n", ppt->k[index_md][ppt->k_size[index_md]-1]/ppw->pvecback[pba->index_bg_a]/ ppw->pvecback[pba->index_bg_H]); if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { class_test(fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.)>ppr->tol_ncdm_initial_w, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time at which the ncdm species number %d is not ultra-relativistic anymore, with w=%g, p=%g and rho=%g\n", n_ncdm, ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm], ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm], ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]); } } /* is at most the time at which sources must be sampled */ tau_upper = ppt->tau_sampling[0]; /* start bisection */ tau_mid = 0.5*(tau_lower + tau_upper); while ((tau_upper - tau_lower)/tau_lower > ppr->tol_tau_approx) { is_early_enough = _TRUE_; class_call(background_at_tau(pba, tau_mid, pba->normal_info, pba->inter_normal, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); /* if there are non-cold relics, check that they are relativistic enough */ if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { if (fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.) > ppr->tol_ncdm_initial_w) is_early_enough = _FALSE_; } } /* also check that the two conditions on (aH/kappa') and (aH/k) are fulfilled */ if (is_early_enough == _TRUE_) { class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); if ((ppw->pvecback[pba->index_bg_a]* ppw->pvecback[pba->index_bg_H]/ ppw->pvecthermo[pth->index_th_dkappa] > ppr->start_small_k_at_tau_c_over_tau_h) || (k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] > ppr->start_large_k_at_tau_h_over_tau_k)) is_early_enough = _FALSE_; } if (is_early_enough == _TRUE_) tau_lower = tau_mid; else tau_upper = tau_mid; tau_mid = 0.5*(tau_lower + tau_upper); } tau = tau_mid; /** - find the number of intervals over which approximation scheme is constant */ class_alloc(interval_number_of,ppw->ap_size*sizeof(int),ppt->error_message); ppw->inter_mode = pba->inter_normal; class_call(perturb_find_approximation_number(ppr, pba, pth, ppt, index_md, k, ppw, tau, ppt->tau_sampling[tau_actual_size-1], &interval_number, interval_number_of), ppt->error_message, ppt->error_message); class_alloc(interval_limit,(interval_number+1)*sizeof(double),ppt->error_message); class_alloc(interval_approx,interval_number*sizeof(int*),ppt->error_message); for (index_interval=0; index_interval<interval_number; index_interval++) class_alloc(interval_approx[index_interval],ppw->ap_size*sizeof(int),ppt->error_message); class_call(perturb_find_approximation_switches(ppr, pba, pth, ppt, index_md, k, ppw, tau, ppt->tau_sampling[tau_actual_size-1], ppr->tol_tau_approx, interval_number, interval_number_of, interval_limit, interval_approx), ppt->error_message, ppt->error_message); free(interval_number_of); /** - fill the structure containing all fixed parameters, indices and workspaces needed by perturb_derivs */ ppaw.ppr = ppr; ppaw.pba = pba; ppaw.pth = pth; ppaw.ppt = ppt; ppaw.index_md = index_md; ppaw.index_ic = index_ic; ppaw.index_k = index_k; ppaw.k = k; ppaw.ppw = ppw; ppaw.ppw->inter_mode = pba->inter_closeby; ppaw.ppw->last_index_back = 0; ppaw.ppw->last_index_thermo = 0; /** - check whether we need to print perturbations to a file for this wavenumber */ perhaps_print_variables = NULL; ppw->index_ikout = -1; for (index_ikout=0; index_ikout<ppt->k_output_values_num; index_ikout++){ if (ppt->index_k_output_values[index_md*ppt->k_output_values_num+index_ikout] == index_k){ ppw->index_ikout = index_ikout; perhaps_print_variables = perturb_print_variables; /* class_call(perturb_prepare_output_file( pba,ppt,ppw,index_ikout,index_md), ppt->error_message, ppt->error_message); */ } } /** - loop over intervals over which approximation scheme is uniform. For each interval: */ for (index_interval=0; index_interval<interval_number; index_interval++) { /** - --> (a) fix the approximation scheme */ for (index_ap=0; index_ap<ppw->ap_size; index_ap++) ppw->approx[index_ap]=interval_approx[index_interval][index_ap]; /** - --> (b) get the previous approximation scheme. If the current interval starts from the initial time tau_ini, the previous approximation is set to be a NULL pointer, so that the function perturb_vector_init() knows that perturbations must be initialized */ if (index_interval==0) { previous_approx=NULL; } else { previous_approx=interval_approx[index_interval-1]; } /** - --> (c) define the vector of perturbations to be integrated over. If the current interval starts from the initial time tau_ini, fill the vector with initial conditions for each mode. If it starts from an approximation switching point, redistribute correctly the perturbations from the previous to the new vector of perturbations. */ class_call(perturb_vector_init(ppr, pba, pth, ppt, index_md, index_ic, k, interval_limit[index_interval], ppw, previous_approx), ppt->error_message, ppt->error_message); /** - --> (d) integrate the perturbations over the current interval. */ if(ppr->evolver == rk){ generic_evolver = evolver_rk; } else{ generic_evolver = evolver_ndf15; } class_call(generic_evolver(perturb_derivs, interval_limit[index_interval], interval_limit[index_interval+1], ppw->pv->y, ppw->pv->used_in_sources, ppw->pv->pt_size, &ppaw, ppr->tol_perturb_integration, ppr->smallest_allowed_variation, perturb_timescale, ppr->perturb_integration_stepsize, ppt->tau_sampling, tau_actual_size, perturb_sources, perhaps_print_variables, ppt->error_message), ppt->error_message, ppt->error_message); } /** - if perturbations were printed in a file, close the file */ //if (perhaps_print_variables != NULL) // fclose(ppw->perturb_output_file); /** - fill the source terms array with zeros for all times between the last integrated time tau_max and tau_today. */ for (index_tau = tau_actual_size; index_tau < ppt->tau_size; index_tau++) { for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) { ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + index_type] [index_tau * ppt->k_size[index_md] + index_k] = 0.; } } /** - free quantities allocated at the beginning of the routine */ class_call(perturb_vector_free(ppw->pv), ppt->error_message, ppt->error_message); for (index_interval=0; index_interval<interval_number; index_interval++) free(interval_approx[index_interval]); free(interval_approx); free(interval_limit); return _SUCCESS_; } int perturb_prepare_output(struct background * pba, struct perturbs * ppt){ int n_ncdm; char tmp[40]; ppt->scalar_titles[0]='\0'; ppt->vector_titles[0]='\0'; ppt->tensor_titles[0]='\0'; if (ppt->k_output_values_num > 0) { /** Write titles for all perturbations that we would like to print/store. */ if (ppt->has_scalars == _TRUE_){ class_store_columntitle(ppt->scalar_titles,"tau [Mpc]",_TRUE_); class_store_columntitle(ppt->scalar_titles,"a",_TRUE_); class_store_columntitle(ppt->scalar_titles,"delta_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"theta_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"shear_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"pol0_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"pol1_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"pol2_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"delta_b",_TRUE_); class_store_columntitle(ppt->scalar_titles,"theta_b",_TRUE_); class_store_columntitle(ppt->scalar_titles,"psi",_TRUE_); class_store_columntitle(ppt->scalar_titles,"phi",_TRUE_); /* Perturbed recombination */ class_store_columntitle(ppt->scalar_titles,"delta_Tb",ppt->has_perturbed_recombination); class_store_columntitle(ppt->scalar_titles,"delta_chi",ppt->has_perturbed_recombination); /* Ultrarelativistic species */ class_store_columntitle(ppt->scalar_titles,"delta_ur",pba->has_ur); class_store_columntitle(ppt->scalar_titles,"theta_ur",pba->has_ur); class_store_columntitle(ppt->scalar_titles,"shear_ur",pba->has_ur); /* Cold dark matter */ class_store_columntitle(ppt->scalar_titles,"delta_cdm",pba->has_cdm); class_store_columntitle(ppt->scalar_titles,"theta_cdm",pba->has_cdm); /* Non-cold dark matter */ if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ sprintf(tmp,"delta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_); sprintf(tmp,"theta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_); sprintf(tmp,"shear_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_); sprintf(tmp,"cs2_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_); } } /* Decaying cold dark matter */ class_store_columntitle(ppt->scalar_titles, "delta_dcdm", pba->has_dcdm); class_store_columntitle(ppt->scalar_titles, "theta_dcdm", pba->has_dcdm); /* Decay radiation */ class_store_columntitle(ppt->scalar_titles, "delta_dr", pba->has_dr); class_store_columntitle(ppt->scalar_titles, "theta_dr", pba->has_dr); class_store_columntitle(ppt->scalar_titles, "shear_dr", pba->has_dr); /* Scalar field scf */ class_store_columntitle(ppt->scalar_titles, "delta_scf", pba->has_scf); class_store_columntitle(ppt->scalar_titles, "theta_scf", pba->has_scf); ppt->number_of_scalar_titles = get_number_of_titles(ppt->scalar_titles); } if (ppt->has_tensors == _TRUE_){ class_store_columntitle(ppt->tensor_titles,"tau [Mpc]",_TRUE_); class_store_columntitle(ppt->tensor_titles,"a",_TRUE_); class_store_columntitle(ppt->tensor_titles,"delta_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"shear_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"l4_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"pol0_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"pol2_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"pol4_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"H (gw)",_TRUE_); class_store_columntitle(ppt->tensor_titles,"Hdot (gwdot)",_TRUE_); class_store_columntitle(ppt->tensor_titles,"delta_ur",ppt->evolve_tensor_ur); class_store_columntitle(ppt->tensor_titles,"shear_ur",ppt->evolve_tensor_ur); class_store_columntitle(ppt->tensor_titles,"l4_ur",ppt->evolve_tensor_ur); if (ppt->evolve_tensor_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ sprintf(tmp,"delta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_); sprintf(tmp,"theta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_); sprintf(tmp,"shear_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_); } } ppt->number_of_tensor_titles = get_number_of_titles(ppt->tensor_titles); } } return _SUCCESS_; } /** * For a given mode and wavenumber, find the number of intervals of * time between tau_ini and tau_end such that the approximation * scheme (and the number of perturbation equations) is uniform. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param k Input: index of wavenumber * @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces * @param tau_ini Input: initial time of the perturbation integration * @param tau_end Input: final time of the perturbation integration * @param interval_number Output: total number of intervals * @param interval_number_of Output: number of intervals with respect to each particular approximation * @return the error status */ int perturb_find_approximation_number( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, struct perturb_workspace * ppw, double tau_ini, double tau_end, int * interval_number, int * interval_number_of /* interval_number_of[index_ap] (already allocated) */ ){ /** Summary: */ /* index running over approximations */ int index_ap; /* value of a given approximation at tau_ini and tau_end */ int flag_ini,flag_end; /** - fix default number of intervals to one (if no approximation switch) */ *interval_number=1; /** - loop over each approximation and add the number of approximation switching times */ for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_ini, ppw), ppt->error_message, ppt->error_message); flag_ini = ppw->approx[index_ap]; class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_end, ppw), ppt->error_message, ppt->error_message); flag_end = ppw->approx[index_ap]; class_test(flag_end<flag_ini, ppt->error_message, "For each approximation scheme, the declaration of approximation labels in the enumeration must follow chronological order, e.g: enum approx_flags {flag1, flag2, flag3} with flag1 being the initial one and flag3 the final one"); *interval_number += flag_end-flag_ini; interval_number_of[index_ap] = flag_end-flag_ini+1; } return _SUCCESS_; } /** * For a given mode and wavenumber, find the values of time at which * the approximation changes. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param k Input: index of wavenumber * @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces * @param tau_ini Input: initial time of the perturbation integration * @param tau_end Input: final time of the perturbation integration * @param precision Input: tolerance on output values * @param interval_number Input: total number of intervals * @param interval_number_of Input: number of intervals with respect to each particular approximation * @param interval_limit Output: value of time at the boundary of the intervals: tau_ini, tau_switch1, ..., tau_end * @param interval_approx Output: value of approximations in each interval * @return the error status */ int perturb_find_approximation_switches( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, struct perturb_workspace * ppw, double tau_ini, double tau_end, double precision, int interval_number, int * interval_number_of, double * interval_limit, /* interval_limit[index_interval] (already allocated) */ int ** interval_approx /* interval_approx[index_interval][index_ap] (already allocated) */ ){ /** Summary: */ int index_ap; int index_switch; int index_switch_tot; int num_switch; double tau_min,lower_bound,upper_bound; double mid=0; double * unsorted_tau_switch; double next_tau_switch; int flag_ini; int num_switching_at_given_time; /** - write in output arrays the initial time and approximation */ interval_limit[0]=tau_ini; class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_ini, ppw), ppt->error_message, ppt->error_message); for (index_ap=0; index_ap<ppw->ap_size; index_ap++) interval_approx[0][index_ap]=ppw->approx[index_ap]; /** - if there are no approximation switches, just write final time and return */ if (interval_number == 1) { interval_limit[1]=tau_end; } /** - if there are switches, consider approximations one after each other. Find switching time by bisection. Store all switches in arbitrary order in array unsorted_tau_switch[ ] */ else { class_alloc(unsorted_tau_switch,(interval_number-1)*sizeof(double),ppt->error_message); index_switch_tot=0; for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { if (interval_number_of[index_ap] > 1) { num_switch = interval_number_of[index_ap]-1; tau_min = tau_ini; flag_ini = interval_approx[0][index_ap]; for (index_switch=0; index_switch<num_switch; index_switch++) { lower_bound=tau_min; upper_bound=tau_end; mid = 0.5*(lower_bound+upper_bound); while (upper_bound - lower_bound > precision) { class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, mid, ppw), ppt->error_message, ppt->error_message); if (ppw->approx[index_ap] > flag_ini+index_switch) { upper_bound=mid; } else { lower_bound=mid; } mid = 0.5*(lower_bound+upper_bound); } unsorted_tau_switch[index_switch_tot]=mid; index_switch_tot++; tau_min=mid; } } } class_test(index_switch_tot != (interval_number-1), ppt->error_message, "bug in approximation switch search routine: should have %d = %d", index_switch_tot,interval_number-1); /** - now sort interval limits in correct order */ index_switch_tot=1; while (index_switch_tot < interval_number) { next_tau_switch=tau_end; for (index_switch=0; index_switch<interval_number-1; index_switch++) { if ((unsorted_tau_switch[index_switch] > interval_limit[index_switch_tot-1]) && (unsorted_tau_switch[index_switch] < next_tau_switch)) { next_tau_switch=unsorted_tau_switch[index_switch]; } } interval_limit[index_switch_tot]=next_tau_switch; index_switch_tot++; } interval_limit[index_switch_tot]=tau_end; class_test(index_switch_tot != interval_number, ppt->error_message, "most probably two approximation switching time were found to be equal, which cannot be handled\n"); /** - store each approximation in chronological order */ for (index_switch=1; index_switch<interval_number; index_switch++) { class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, 0.5*(interval_limit[index_switch]+interval_limit[index_switch+1]), ppw), ppt->error_message, ppt->error_message); for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { interval_approx[index_switch][index_ap]=ppw->approx[index_ap]; /* check here that approximation does not go backward (remember that by definition the value of an approximation can only increase) */ class_test(interval_approx[index_switch][index_ap] < interval_approx[index_switch-1][index_ap], ppt->error_message, "The approximation with label %d is not defined correctly: it goes backward (from %d to %d) for k=%e and between tau=%e and %e; this cannot be handled\n", index_ap, interval_approx[index_switch-1][index_ap], interval_approx[index_switch][index_ap], k, 0.5*(interval_limit[index_switch-1]+interval_limit[index_switch]), 0.5*(interval_limit[index_switch]+interval_limit[index_switch+1]) ); } /* check here that more than one approximation is not switched on at a given time */ num_switching_at_given_time=0; for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { if (interval_approx[index_switch][index_ap] != interval_approx[index_switch-1][index_ap]) num_switching_at_given_time++; } class_test(num_switching_at_given_time != 1, ppt->error_message, "for k=%e, at tau=%g, you switch %d approximations at the same time, this cannot be handled. Usually happens in two cases: triggers for different approximations coincide, or one approx is reversible\n", k, interval_limit[index_switch], num_switching_at_given_time); if (ppt->perturbations_verbose>2) { if (_scalars_) { if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) && (interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off)) fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]); //fprintf(stderr,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]); //TBC if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) && (interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on)) fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation at tau=%e\n",k,interval_limit[index_switch]); if (pba->has_ur == _TRUE_) { if ((interval_approx[index_switch-1][ppw->index_ap_ufa]==(int)ufa_off) && (interval_approx[index_switch][ppw->index_ap_ufa]==(int)ufa_on)) { fprintf(stdout,"Mode k=%e: will switch on ur fluid approximation at tau=%e\n",k,interval_limit[index_switch]); } } if (pba->has_ncdm == _TRUE_) { if ((interval_approx[index_switch-1][ppw->index_ap_ncdmfa]==(int)ncdmfa_off) && (interval_approx[index_switch][ppw->index_ap_ncdmfa]==(int)ncdmfa_on)) { fprintf(stdout,"Mode k=%e: will switch on ncdm fluid approximation at tau=%e\n",k,interval_limit[index_switch]); } } } if (_tensors_) { if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) && (interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off)) fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation for tensors at tau=%e\n",k,interval_limit[index_switch]); if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) && (interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on)) fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation for tensors at tau=%e\n",k,interval_limit[index_switch]); } } } free(unsorted_tau_switch); class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_end, ppw), ppt->error_message, ppt->error_message); } return _SUCCESS_; } /** * Initialize the field '-->pv' of a perturb_workspace structure, which * is a perturb_vector structure. This structure contains indices and * values of all quantities which need to be integrated with respect * to time (and only them: quantities fixed analytically or obeying * constraint equations are NOT included in this vector). This routine * distinguishes between two cases: * * --> the input pa_old is set to the NULL pointer: * * This happens when we start integrating over a new wavenumber and we * want to set initial conditions for the perturbations. Then, it is * assumed that ppw-->pv is not yet allocated. This routine allocates * it, defines all indices, and then fills the vector ppw-->pv-->y with * the initial conditions defined in perturb_initial_conditions. * * --> the input pa_old is not set to the NULL pointer and describes * some set of approximations: * * This happens when we need to change approximation scheme while * integrating over a given wavenumber. The new approximation * described by ppw-->pa is then different from pa_old. Then, this * routine allocates a new vector with a new size and new index * values; it fills this vector with initial conditions taken from the * previous vector passed as an input in ppw-->pv, and eventually with * some analytic approximations for the new variables appearing at * this time; then the new vector comes in replacement of the old one, * which is freed. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param index_ic Input: index of initial condition under consideration (ad, iso...) * @param k Input: wavenumber * @param tau Input: conformal time * @param ppw Input/Output: workspace containing in input the approximation scheme, the background/thermodynamics/metric quantities, and eventually the previous vector y; and in output the new vector y. * @param pa_old Input: NULL is we need to set y to initial conditions for a new wavenumber; points towards a perturb_approximations if we want to switch of approximation. * @return the error status */ int perturb_vector_init( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, int index_ic, double k, double tau, struct perturb_workspace * ppw, /* ppw->pv unallocated if pa_old = NULL, allocated and filled otherwise */ int * pa_old ) { /** Summary: */ /** - define local variables */ struct perturb_vector * ppv; int index_pt; int l; int n_ncdm,index_q,ncdm_l_size; double rho_plus_p_ncdm,q,q2,epsilon,a,factor; /** - allocate a new perturb_vector structure to which ppw-->pv will point at the end of the routine */ class_alloc(ppv,sizeof(struct perturb_vector),ppt->error_message); /** - initialize pointers to NULL (they will be allocated later if needed), relevant for perturb_vector_free() */ ppv->l_max_ncdm = NULL; ppv->q_size_ncdm = NULL; /** - define all indices in this new vector (depends on approximation scheme, described by the input structure ppw-->pa) */ index_pt = 0; if (_scalars_) { /* reject inconsistent values of the number of mutipoles in photon temperature hierarchy */ class_test(ppr->l_max_g < 4, ppt->error_message, "ppr->l_max_g should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third and fourth momentum"); /* reject inconsistent values of the number of mutipoles in photon polarization hierarchy */ class_test(ppr->l_max_pol_g < 4, ppt->error_message, "ppr->l_max_pol_g should be at least 4"); /* reject inconsistent values of the number of mutipoles in decay radiation hierarchy */ if (pba->has_dr == _TRUE_) { class_test(ppr->l_max_dr < 4, ppt->error_message, "ppr->l_max_dr should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum"); } /* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierarchy */ if (pba->has_ur == _TRUE_) { class_test(ppr->l_max_ur < 4, ppt->error_message, "ppr->l_max_ur should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum"); } /* photons */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ /* temperature */ ppv->l_max_g = ppr->l_max_g; class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */ class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */ class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* higher momenta */ /* polarization */ ppv->l_max_pol_g = ppr->l_max_pol_g; class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); } } /* baryons */ class_define_index(ppv->index_pt_delta_b,_TRUE_,index_pt,1); /* baryon density */ class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1); /* baryon velocity */ /* cdm */ class_define_index(ppv->index_pt_delta_cdm,pba->has_cdm,index_pt,1); /* cdm density */ class_define_index(ppv->index_pt_theta_cdm,pba->has_cdm && (ppt->gauge == newtonian),index_pt,1); /* cdm velocity */ /* dcdm */ class_define_index(ppv->index_pt_delta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm density */ class_define_index(ppv->index_pt_theta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm velocity */ /* ultra relativistic decay radiation */ if (pba->has_dr==_TRUE_){ ppv->l_max_dr = ppr->l_max_dr; class_define_index(ppv->index_pt_F0_dr,_TRUE_,index_pt,ppv->l_max_dr+1); /* all momenta in Boltzmann hierarchy */ } /* fluid */ class_define_index(ppv->index_pt_delta_fld,pba->has_fld,index_pt,1); /* fluid density */ class_define_index(ppv->index_pt_theta_fld,pba->has_fld,index_pt,1); /* fluid velocity */ /* scalar field */ class_define_index(ppv->index_pt_phi_scf,pba->has_scf,index_pt,1); /* scalar field density */ class_define_index(ppv->index_pt_phi_prime_scf,pba->has_scf,index_pt,1); /* scalar field velocity */ /* perturbed recombination: the indices are defined once tca is off. */ if ( (ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){ class_define_index(ppv->index_pt_perturbed_recombination_delta_temp,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_perturbed_recombination_delta_chi,_TRUE_,index_pt,1); } /* ultra relativistic neutrinos */ if (pba->has_ur && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) { class_define_index(ppv->index_pt_delta_ur,_TRUE_,index_pt,1); /* density of ultra-relativistic neutrinos/relics */ class_define_index(ppv->index_pt_theta_ur,_TRUE_,index_pt,1); /* velocity of ultra-relativistic neutrinos/relics */ class_define_index(ppv->index_pt_shear_ur,_TRUE_,index_pt,1); /* shear of ultra-relativistic neutrinos/relics */ if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { ppv->l_max_ur = ppr->l_max_ur; class_define_index(ppv->index_pt_l3_ur,_TRUE_,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */ } } /* non-cold dark matter */ if (pba->has_ncdm == _TRUE_) { ppv->index_pt_psi0_ncdm1 = index_pt; /* density of ultra-relativistic neutrinos/relics */ ppv->N_ncdm = pba->N_ncdm; class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ // Set value of ppv->l_max_ncdm: if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_off){ /* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierarchy */ class_test(ppr->l_max_ncdm < 4, ppt->error_message, "ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm); //Copy value from precision parameter: ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm; ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm]; } else{ // In the fluid approximation, hierarchy is cut at lmax = 2 and q dependence is integrated out: ppv->l_max_ncdm[n_ncdm] = 2; ppv->q_size_ncdm[n_ncdm] = 1; } index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm]; } } /* metric (only quantities to be integrated, not those obeying constraint equations) */ /* metric perturbation eta of synchronous gauge */ class_define_index(ppv->index_pt_eta,ppt->gauge == synchronous,index_pt,1); /* metric perturbation phi of newtonian gauge ( we could fix it using Einstein equations as a constraint equation for phi, but integration is numerically more stable if we actually evolve phi) */ class_define_index(ppv->index_pt_phi,ppt->gauge == newtonian,index_pt,1); } if (_vectors_) { /* Vector baryon velocity: v_b^{(1)}. */ class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1); /* eventually reject inconsistent values of the number of mutipoles in photon temperature hierarchy and polarization*/ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppv->l_max_g = ppr->l_max_g_ten; class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */ class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */ class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */ class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */ ppv->l_max_pol_g = ppr->l_max_pol_g_ten; class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */ class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */ class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */ class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */ } } /** - (a) metric perturbations V or \f$ h_v \f$ depending on gauge */ if (ppt->gauge == synchronous){ class_define_index(ppv->index_pt_hv_prime,_TRUE_,index_pt,1); } if (ppt->gauge == newtonian){ class_define_index(ppv->index_pt_V,_TRUE_,index_pt,1); } } if (_tensors_) { /* reject inconsistent values of the number of mutipoles in photon temperature hierarchy */ class_test(ppr->l_max_g_ten < 4, ppt->error_message, "ppr->l_max_g_ten should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third momentum"); /* reject inconsistent values of the number of mutipoles in photon polarization hierarchy */ class_test(ppr->l_max_pol_g_ten < 4, ppt->error_message, "ppr->l_max_pol_g_ten should be at least 4"); if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppv->l_max_g = ppr->l_max_g_ten; class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */ class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */ class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */ class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */ ppv->l_max_pol_g = ppr->l_max_pol_g_ten; class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */ class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */ class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */ class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */ } } /* ultra relativistic neutrinos */ class_define_index(ppv->index_pt_delta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur density */ class_define_index(ppv->index_pt_theta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur velocity */ class_define_index(ppv->index_pt_shear_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur shear */ ppv->l_max_ur = ppr->l_max_ur; class_define_index(ppv->index_pt_l3_ur,ppt->evolve_tensor_ur,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */ if (ppt->evolve_tensor_ncdm == _TRUE_) { ppv->index_pt_psi0_ncdm1 = index_pt; ppv->N_ncdm = pba->N_ncdm; class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ // Set value of ppv->l_max_ncdm: class_test(ppr->l_max_ncdm < 4, ppt->error_message, "ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm); //Copy value from precision parameter: ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm; ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm]; index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm]; } } /** - (b) metric perturbation h is a propagating degree of freedom, so h and hdot are included in the vector of ordinary perturbations, no in that of metric perturbations */ class_define_index(ppv->index_pt_gw,_TRUE_,index_pt,1); /* tensor metric perturbation h (gravitational waves) */ class_define_index(ppv->index_pt_gwdot,_TRUE_,index_pt,1); /* its time-derivative */ } ppv->pt_size = index_pt; /** - allocate vectors for storing the values of all these quantities and their time-derivatives at a given time */ class_calloc(ppv->y,ppv->pt_size,sizeof(double),ppt->error_message); class_alloc(ppv->dy,ppv->pt_size*sizeof(double),ppt->error_message); class_alloc(ppv->used_in_sources,ppv->pt_size*sizeof(int),ppt->error_message); /** - specify which perturbations are needed in the evaluation of source terms */ /* take all of them by default */ for (index_pt=0; index_pt<ppv->pt_size; index_pt++) ppv->used_in_sources[index_pt] = _TRUE_; /* indicate which ones are not needed (this is just for saving time, omitting perturbations in this list will not change the results!) */ if (_scalars_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* we don't need temperature multipoles above l=2 (but they are defined only when rsa and tca are off) */ for (index_pt=ppv->index_pt_l3_g; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; /* for polarization, we only need l=0,2 (but l =1,3, ... are defined only when rsa and tca are off) */ ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_; for (index_pt=ppv->index_pt_pol3_g; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; } } if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { /* we don't need ur multipoles above l=2 (but they are defined only when rsa and ufa are off) */ for (index_pt=ppv->index_pt_l3_ur; index_pt <= ppv->index_pt_delta_ur+ppv->l_max_ur; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; } } } if (pba->has_ncdm == _TRUE_) { /* we don't need ncdm multipoles above l=2 (but they are defined only when ncdmfa is off) */ index_pt = ppv->index_pt_psi0_ncdm1; for(n_ncdm = 0; n_ncdm < ppv-> N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){ if (l>2) ppv->used_in_sources[index_pt]=_FALSE_; index_pt++; } } } } } if (_tensors_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* we don't need temperature multipoles above except l=0,2,4 */ ppv->used_in_sources[ppv->index_pt_theta_g]=_FALSE_; ppv->used_in_sources[ppv->index_pt_l3_g]=_FALSE_; for (index_pt=ppv->index_pt_delta_g+5; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; /* same for polarization, we only need l=0,2,4 */ ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_; ppv->used_in_sources[ppv->index_pt_pol3_g]=_FALSE_; for (index_pt=ppv->index_pt_pol0_g+5; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; } } /* we need h' but not h */ ppv->used_in_sources[ppv->index_pt_gw]=_FALSE_; } /** - case of setting initial conditions for a new wavenumber */ if (pa_old == NULL) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: initializing vector at tau=%e\n",k,tau); if (_scalars_) { /** - --> (a) check that current approximation scheme is consistent with initial conditions */ class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on, ppt->error_message, "scalar initial conditions assume radiation streaming approximation turned off"); if (pba->has_ur == _TRUE_) { class_test(ppw->approx[ppw->index_ap_ufa] == (int)ufa_on, ppt->error_message, "scalar initial conditions assume ur fluid approximation turned off"); } if (pba->has_ncdm == _TRUE_) { class_test(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on, ppt->error_message, "scalar initial conditions assume ncdm fluid approximation turned off"); } class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off, ppt->error_message, "scalar initial conditions assume tight-coupling approximation turned on"); } if (_tensors_) { class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off, ppt->error_message, "tensor initial conditions assume tight-coupling approximation turned on"); class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on, ppt->error_message, "tensor initial conditions assume radiation streaming approximation turned off"); } /** - --> (b) let ppw-->pv points towards the perturb_vector structure that we just created */ ppw->pv = ppv; /** - --> (c) fill the vector ppw-->pv-->y with appropriate initial conditions */ class_call(perturb_initial_conditions(ppr, pba, ppt, index_md, index_ic, k, tau, ppw), ppt->error_message, ppt->error_message); } /** - case of switching approximation while a wavenumber is being integrated */ else { /** - --> (a) for the scalar mode: */ if (_scalars_) { /** - ---> (a.1.) check that the change of approximation scheme makes sense (note: before calling this routine there is already a check that we wish to change only one approximation flag at a time) */ class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on), ppt->error_message, "at tau=%g: the tight-coupling approximation can be switched off, not on",tau); /** - ---> (a.2.) some variables (b, cdm, fld, ...) are not affected by any approximation. They need to be reconducted whatever the approximation switching is. We treat them here. Below we will treat other variables case by case. */ ppv->y[ppv->index_pt_delta_b] = ppw->pv->y[ppw->pv->index_pt_delta_b]; ppv->y[ppv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_b]; if (pba->has_cdm == _TRUE_) { ppv->y[ppv->index_pt_delta_cdm] = ppw->pv->y[ppw->pv->index_pt_delta_cdm]; if (ppt->gauge == newtonian) { ppv->y[ppv->index_pt_theta_cdm] = ppw->pv->y[ppw->pv->index_pt_theta_cdm]; } } if (pba->has_dcdm == _TRUE_) { ppv->y[ppv->index_pt_delta_dcdm] = ppw->pv->y[ppw->pv->index_pt_delta_dcdm]; ppv->y[ppv->index_pt_theta_dcdm] = ppw->pv->y[ppw->pv->index_pt_theta_dcdm]; } if (pba->has_dr == _TRUE_){ for (l=0; l <= ppv->l_max_dr; l++) ppv->y[ppv->index_pt_F0_dr+l] = ppw->pv->y[ppw->pv->index_pt_F0_dr+l]; } if (pba->has_fld == _TRUE_) { ppv->y[ppv->index_pt_delta_fld] = ppw->pv->y[ppw->pv->index_pt_delta_fld]; ppv->y[ppv->index_pt_theta_fld] = ppw->pv->y[ppw->pv->index_pt_theta_fld]; } if (pba->has_scf == _TRUE_) { ppv->y[ppv->index_pt_phi_scf] = ppw->pv->y[ppw->pv->index_pt_phi_scf]; ppv->y[ppv->index_pt_phi_prime_scf] = ppw->pv->y[ppw->pv->index_pt_phi_prime_scf]; } if (ppt->gauge == synchronous) ppv->y[ppv->index_pt_eta] = ppw->pv->y[ppw->pv->index_pt_eta]; if (ppt->gauge == newtonian) ppv->y[ppv->index_pt_phi] = ppw->pv->y[ppw->pv->index_pt_phi]; /* -- case of switching off tight coupling approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau); ppv->y[ppv->index_pt_delta_g] = ppw->pv->y[ppw->pv->index_pt_delta_g]; ppv->y[ppv->index_pt_theta_g] = ppw->pv->y[ppw->pv->index_pt_theta_g]; /* tight-coupling approximation for shear_g (previously computed in perturb_derivs: perturb_derivs is always called at the end of generic_evolver, in order to update all quantities in ppw to the time at which the approximation is switched off) */ ppv->y[ppv->index_pt_shear_g] = ppw->tca_shear_g; ppv->y[ppv->index_pt_l3_g] = 6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->s_l[3]*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for l=3 */ ppv->y[ppv->index_pt_pol0_g] = 2.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=0 */ ppv->y[ppv->index_pt_pol1_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*(5.-2.*ppw->s_l[2])/6.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=1 */ ppv->y[ppv->index_pt_pol2_g] = 0.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=2 */ ppv->y[ppv->index_pt_pol3_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*3.*ppw->s_l[3]/14.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=3 */ if (pba->has_ur == _TRUE_) { ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { ppv->y[ppv->index_pt_l3_ur] = ppw->pv->y[ppw->pv->index_pt_l3_ur]; for (l=4; l <= ppv->l_max_ur; l++) ppv->y[ppv->index_pt_delta_ur+l] = ppw->pv->y[ppw->pv->index_pt_delta_ur+l]; } } if (pba->has_ncdm == _TRUE_) { index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){ // This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly. ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } /* perturbed recombination */ /* the initial conditions are set when tca is switched off (current block) */ if (ppt->has_perturbed_recombination == _TRUE_){ ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] = 1./3.*ppv->y[ppw->pv->index_pt_delta_b]; ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] =0.; } } // end of block tca ON -> tca OFF /* perturbed recombination */ /* For any other transition in the approximation scheme, we should just copy the value of the perturbations, provided tca is already off (otherwise the indices are not yet allocated). For instance, we do not want to copy the values in the (k,tau) region where both UFA and TCA are engaged.*/ if ((ppt->has_perturbed_recombination == _TRUE_)&&(pa_old[ppw->index_ap_tca]==(int)tca_off)){ ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] = ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_temp]; ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] = ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_chi]; } /* -- case of switching on radiation streaming approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]); if (pba->has_ncdm == _TRUE_) { index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){ ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } } /* -- case of switching on ur fluid approximation. Provide correct initial conditions to new set of variables */ if (pba->has_ur == _TRUE_) { if ((pa_old[ppw->index_ap_ufa] == (int)ufa_off) && (ppw->approx[ppw->index_ap_ufa] == (int)ufa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on ur fluid approximation at tau=%e\n",k,tau); if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_g] = ppw->pv->y[ppw->pv->index_pt_delta_g]; ppv->y[ppv->index_pt_theta_g] = ppw->pv->y[ppw->pv->index_pt_theta_g]; } if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) { ppv->y[ppv->index_pt_shear_g] = ppw->pv->y[ppw->pv->index_pt_shear_g]; ppv->y[ppv->index_pt_l3_g] = ppw->pv->y[ppw->pv->index_pt_l3_g]; for (l = 4; l <= ppw->pv->l_max_g; l++) { ppv->y[ppv->index_pt_delta_g+l] = ppw->pv->y[ppw->pv->index_pt_delta_g+l]; } ppv->y[ppv->index_pt_pol0_g] = ppw->pv->y[ppw->pv->index_pt_pol0_g]; ppv->y[ppv->index_pt_pol1_g] = ppw->pv->y[ppw->pv->index_pt_pol1_g]; ppv->y[ppv->index_pt_pol2_g] = ppw->pv->y[ppw->pv->index_pt_pol2_g]; ppv->y[ppv->index_pt_pol3_g] = ppw->pv->y[ppw->pv->index_pt_pol3_g]; for (l = 4; l <= ppw->pv->l_max_pol_g; l++) { ppv->y[ppv->index_pt_pol0_g+l] = ppw->pv->y[ppw->pv->index_pt_pol0_g+l]; } } if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; } if (pba->has_ncdm == _TRUE_) { index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){ /* This is correct even when ncdmfa == off, since ppv->l_max_ncdm and ppv->q_size_ncdm is updated.*/ ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } } } /* -- case of switching on ncdm fluid approximation. Provide correct initial conditions to new set of variables */ if (pba->has_ncdm == _TRUE_) { if ((pa_old[ppw->index_ap_ncdmfa] == (int)ncdmfa_off) && (ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on ncdm fluid approximation at tau=%e\n",k,tau); if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_g] = ppw->pv->y[ppw->pv->index_pt_delta_g]; ppv->y[ppv->index_pt_theta_g] = ppw->pv->y[ppw->pv->index_pt_theta_g]; } if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) { ppv->y[ppv->index_pt_shear_g] = ppw->pv->y[ppw->pv->index_pt_shear_g]; ppv->y[ppv->index_pt_l3_g] = ppw->pv->y[ppw->pv->index_pt_l3_g]; for (l = 4; l <= ppw->pv->l_max_g; l++) { ppv->y[ppv->index_pt_delta_g+l] = ppw->pv->y[ppw->pv->index_pt_delta_g+l]; } ppv->y[ppv->index_pt_pol0_g] = ppw->pv->y[ppw->pv->index_pt_pol0_g]; ppv->y[ppv->index_pt_pol1_g] = ppw->pv->y[ppw->pv->index_pt_pol1_g]; ppv->y[ppv->index_pt_pol2_g] = ppw->pv->y[ppw->pv->index_pt_pol2_g]; ppv->y[ppv->index_pt_pol3_g] = ppw->pv->y[ppw->pv->index_pt_pol3_g]; for (l = 4; l <= ppw->pv->l_max_pol_g; l++) { ppv->y[ppv->index_pt_pol0_g+l] = ppw->pv->y[ppw->pv->index_pt_pol0_g+l]; } } if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { ppv->y[ppv->index_pt_l3_ur] = ppw->pv->y[ppw->pv->index_pt_l3_ur]; for (l=4; l <= ppv->l_max_ur; l++) ppv->y[ppv->index_pt_delta_ur+l] = ppw->pv->y[ppw->pv->index_pt_delta_ur+l]; } } } a = ppw->pvecback[pba->index_bg_a]; index_pt = ppw->pv->index_pt_psi0_ncdm1; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ // We are in the fluid approximation, so ncdm_l_size is always 3. ncdm_l_size = ppv->l_max_ncdm[n_ncdm]+1; rho_plus_p_ncdm = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]; for(l=0; l<=2; l++){ ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+l] = 0.0; } factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for(index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++){ // Integrate over distributions: q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] += pba->w_ncdm[n_ncdm][index_q]*q2*epsilon* ppw->pv->y[index_pt]; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] += pba->w_ncdm[n_ncdm][index_q]*q2*q* ppw->pv->y[index_pt+1]; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] += pba->w_ncdm[n_ncdm][index_q]*q2*q2/epsilon* ppw->pv->y[index_pt+2]; //Jump to next momentum bin in ppw->pv->y: index_pt += (ppw->pv->l_max_ncdm[n_ncdm]+1); } ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] *=factor/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] *=k*factor/rho_plus_p_ncdm; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] *=2.0/3.0*factor/rho_plus_p_ncdm; } } } } /** - --> (b) for the vector mode */ if (_vectors_) { /** - ---> (b.1.) check that the change of approximation scheme makes sense (note: before calling this routine there is already a check that we wish to change only one approximation flag at a time) */ class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on), ppt->error_message, "at tau=%g: the tight-coupling approximation can be switched off, not on",tau); /** - ---> (b.2.) some variables (gw, gwdot, ...) are not affected by any approximation. They need to be reconducted whatever the approximation switching is. We treat them here. Below we will treat other variables case by case. */ if (ppt->gauge == synchronous){ ppv->y[ppv->index_pt_hv_prime] = ppw->pv->y[ppw->pv->index_pt_hv_prime]; } if (ppt->gauge == newtonian){ ppv->y[ppv->index_pt_V] = ppw->pv->y[ppw->pv->index_pt_V]; } ppv->y[ppv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_b]; /* -- case of switching off tight coupling approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau); ppv->y[ppv->index_pt_delta_g] = 0.0; //TBC //-4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; ppv->y[ppv->index_pt_pol0_g] = 0.0; //TBC //1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; } /* -- case of switching on radiation streaming approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]); } } /** - --> (c) for the tensor mode */ if (_tensors_) { /** - ---> (c.1.) check that the change of approximation scheme makes sense (note: before calling this routine there is already a check that we wish to change only one approximation flag at a time) */ class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on), ppt->error_message, "at tau=%g: the tight-coupling approximation can be switched off, not on",tau); /** - ---> (c.2.) some variables (gw, gwdot, ...) are not affected by any approximation. They need to be reconducted whatever the approximation switching is. We treat them here. Below we will treat other variables case by case. */ ppv->y[ppv->index_pt_gw] = ppw->pv->y[ppw->pv->index_pt_gw]; ppv->y[ppv->index_pt_gwdot] = ppw->pv->y[ppw->pv->index_pt_gwdot]; if (ppt->evolve_tensor_ur == _TRUE_){ /* For now, neutrinos go here. */ ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; ppv->y[ppv->index_pt_l3_ur] = ppw->pv->y[ppw->pv->index_pt_l3_ur]; for (l=4; l <= ppv->l_max_ur; l++) ppv->y[ppv->index_pt_delta_ur+l] = ppw->pv->y[ppw->pv->index_pt_delta_ur+l]; } if (ppt->evolve_tensor_ncdm == _TRUE_){ index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){ // This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly. ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } /* -- case of switching off tight coupling approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau); ppv->y[ppv->index_pt_delta_g] = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; ppv->y[ppv->index_pt_pol0_g] = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; } /* -- case of switching on radiation streaming approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]); } } /** - --> (d) free the previous vector of perturbations */ class_call(perturb_vector_free(ppw->pv), ppt->error_message, ppt->error_message); /** - --> (e) let ppw-->pv points towards the perturb_vector structure that we just created */ ppw->pv = ppv; } return _SUCCESS_; } /** * Free the perturb_vector structure. * * @param pv Input: pointer to perturb_vector structure to be freed * @return the error status */ int perturb_vector_free( struct perturb_vector * pv ) { if (pv->l_max_ncdm != NULL) free(pv->l_max_ncdm); if (pv->q_size_ncdm != NULL) free(pv->q_size_ncdm); free(pv->y); free(pv->dy); free(pv->used_in_sources); free(pv); return _SUCCESS_; } /** * For each mode, wavenumber and initial condition, this function * initializes in the vector all values of perturbed variables (in a * given gauge). It is assumed here that all values have previously been * set to zero, only non-zero values are set here. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param index_ic Input: index of initial condition under consideration (ad, iso...) * @param k Input: wavenumber * @param tau Input: conformal time * @param ppw Input/Output: workspace containing in input the approximation scheme, the background/thermodynamics/metric quantities, and eventually the previous vector y; and in output the new vector y. * @return the error status */ int perturb_initial_conditions(struct precision * ppr, struct background * pba, struct perturbs * ppt, int index_md, int index_ic, double k, double tau, struct perturb_workspace * ppw ) { /** Summary: */ /** --> Declare local variables */ double a,a_prime_over_a; double delta_ur=0.,theta_ur=0.,shear_ur=0.,l3_ur=0.,eta=0.,delta_cdm=0.,alpha, alpha_prime; double delta_dr=0; double q,epsilon,k2; int index_q,n_ncdm,idx; double rho_r,rho_m,rho_nu,rho_m_over_rho_r; double fracnu,fracg,fracb,fraccdm,om; double ktau_two,ktau_three; double f_dr; double delta_tot; double velocity_tot; double s2_squared; /** --> For scalars */ if (_scalars_) { /** - (a) compute relevant background quantities: compute rho_r, rho_m, rho_nu (= all relativistic except photons), and their ratio. */ class_call(background_at_tau(pba, tau, pba->normal_info, pba->inter_normal, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); a = ppw->pvecback[pba->index_bg_a]; a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a; /* 8piG/3 rho_r(t_i) */ rho_r = ppw->pvecback[pba->index_bg_rho_g]; /* 8piG/3 rho_m(t_i) */ rho_m = ppw->pvecback[pba->index_bg_rho_b]; /* 8piG/3 rho_nu(t_i) (all neutrinos and collisionless relics being relativistic at that time) */ rho_nu = 0.; if (pba->has_cdm == _TRUE_) { rho_m += ppw->pvecback[pba->index_bg_rho_cdm]; } if (pba->has_dcdm == _TRUE_) { rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]; } if (pba->has_dr == _TRUE_) { rho_r += ppw->pvecback[pba->index_bg_rho_dr]; rho_nu += ppw->pvecback[pba->index_bg_rho_dr]; } if (pba->has_ur == _TRUE_) { rho_r += ppw->pvecback[pba->index_bg_rho_ur]; rho_nu += ppw->pvecback[pba->index_bg_rho_ur]; } if (pba->has_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm<pba->N_ncdm; n_ncdm++){ rho_r += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm]; rho_nu += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm]; } } class_test(rho_r == 0., ppt->error_message, "stop to avoid division by zero"); /* f_nu = Omega_nu(t_i) / Omega_r(t_i) */ fracnu = rho_nu/rho_r; /* f_g = Omega_g(t_i) / Omega_r(t_i) */ fracg = ppw->pvecback[pba->index_bg_rho_g]/rho_r; /* f_b = Omega_b(t_i) / Omega_m(t_i) */ fracb = ppw->pvecback[pba->index_bg_rho_b]/rho_m; /* f_cdm = Omega_cdm(t_i) / Omega_m(t_i) */ fraccdm = 1.-fracb; /* Omega_m(t_i) / Omega_r(t_i) */ rho_m_over_rho_r = rho_m/rho_r; /* omega = Omega_m(t_i) a(t_i) H(t_i) / sqrt(Omega_r(t_i)) = Omega_m(t_0) a(t_0) H(t_0) / sqrt(Omega_r(t_0)) assuming rho_m in a-3 and rho_r in a^-4 = (8piG/3 rho_m(t_i)) a(t_i) / sqrt(8piG/3 rho_r(t_i)) in Mpc-1 This (a priori strange) parameter is the relevant one for expressing a as a function of tau during radiation and matter domination (but not DE domination). Indeed the exact solution of Friedmann when there is only radiation and matter in the universe is a = [H(t_0)^2 Omega_m(t_0) a(t_0)^3 / 4] x [tau^2 + 4 tau / omega] */ om = a*rho_m/sqrt(rho_r); /* (k tau)^2, (k tau)^3 */ ktau_two=k*k*tau*tau; ktau_three=k*tau*ktau_two; /* curvature-dependent factors */ s2_squared = 1.-3.*pba->K/k/k; /** - (b) starts by setting everything in synchronous gauge. If another gauge is needed, we will perform a gauge transformation below. */ /** - --> (b.1.) adiabatic */ if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) { /* The following formulas are valid at leading order in (k*tau) and (om*tau), and order zero in tight-coupling. Identical to first order terms in CRS, except for normalization (when ppr->curvature_ini=1, tau=1: leads to factor 1/2 difference between CRS formulas with beta1=0). Identical to CAMB when om set to zero in theta_g, theta_ur, shear_ur, tau In the non-flat case the relation R=eta is still valid outside the horizon for adiabatic IC. Hence eta is still set to ppr->curvature_ini at leading order. Factors s2 appear through the solution of Einstein equations and equations of motion. */ /* photon density */ ppw->pv->y[ppw->pv->index_pt_delta_g] = - ktau_two/3. * (1.-om*tau/5.) * ppr->curvature_ini * s2_squared; /* photon velocity */ ppw->pv->y[ppw->pv->index_pt_theta_g] = - k*ktau_three/36. * (1.-3.*(1.+5.*fracb-fracnu)/20./(1.-fracnu)*om*tau) * ppr->curvature_ini * s2_squared; /* tighly-coupled baryons */ ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* baryon density */ ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; /* baryon velocity */ if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* cdm density */ /* cdm velocity vanishes in the synchronous gauge */ } if (pba->has_dcdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_dcdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* dcdm density */ /* dcdm velocity velocity vanishes initially in the synchronous gauge */ } /* fluid (assumes wa=0, if this is not the case the fluid will catch anyway the attractor solution) */ if (pba->has_fld == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_fld] = - ktau_two/4.*(1.+pba->w0_fld+pba->wa_fld)*(4.-3.*pba->cs2_fld)/(4.-6.*(pba->w0_fld+pba->wa_fld)+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC: curvature ppw->pv->y[ppw->pv->index_pt_theta_fld] = - k*ktau_three/4.*pba->cs2_fld/(4.-6.*(pba->w0_fld+pba->wa_fld)+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC:curvature } if (pba->has_scf == _TRUE_) { /** - ---> Canonical field (solving for the perturbations): * initial perturbations set to zero, they should reach the attractor soon enough. * - ---> TODO: Incorporate the attractor IC from 1004.5509. * delta_phi \f$ = -(a/k)^2/\phi'(\rho + p)\theta \f$, * delta_phi_prime \f$ = a^2/\phi' \f$ (delta_rho_phi + V'delta_phi), * and assume theta, delta_rho as for perfect fluid * with \f$ c_s^2 = 1 \f$ and w = 1/3 (ASSUMES radiation TRACKING) */ ppw->pv->y[ppw->pv->index_pt_phi_scf] = 0.; /* a*a/k/k/ppw->pvecback[pba->index_bg_phi_prime_scf]*k*ktau_three/4.*1./(4.-6.*(1./3.)+3.*1.) * (ppw->pvecback[pba->index_bg_rho_scf] + ppw->pvecback[pba->index_bg_p_scf])* ppr->curvature_ini * s2_squared; */ ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] = 0.; /* delta_fld expression * rho_scf with the w = 1/3, c_s = 1 a*a/ppw->pvecback[pba->index_bg_phi_prime_scf]*( - ktau_two/4.*(1.+1./3.)*(4.-3.*1.)/(4.-6.*(1/3.)+3.*1.)*ppw->pvecback[pba->index_bg_rho_scf] - ppw->pvecback[pba->index_bg_dV_scf]*ppw->pv->y[ppw->pv->index_pt_phi_scf])* ppr->curvature_ini * s2_squared; */ } /* all relativistic relics: ur, early ncdm, dr */ if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) { delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; /* density of ultra-relativistic neutrinos/relics */ theta_ur = - k*ktau_three/36./(4.*fracnu+15.) * (4.*fracnu+11.+12.*s2_squared-3.*(8.*fracnu*fracnu+50.*fracnu+275.)/20./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini * s2_squared; /* velocity of ultra-relativistic neutrinos/relics */ //TBC shear_ur = ktau_two/(45.+12.*fracnu) * (3.*s2_squared-1.) * (1.+(4.*fracnu-5.)/4./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini;//TBC /s2_squared; /* shear of ultra-relativistic neutrinos/relics */ //TBC:0 l3_ur = ktau_three*2./7./(12.*fracnu+45.)* ppr->curvature_ini;//TBC if (pba->has_dr == _TRUE_) delta_dr = delta_ur; } /* synchronous metric perturbation eta */ //eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)) / s2_squared; //eta = ppr->curvature_ini * s2_squared * (1.-ktau_two/12./(15.+4.*fracnu)*(15.*s2_squared-10.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)); eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)); } /* isocurvature initial conditions taken from Bucher, Moodely, Turok 99, with just a different normalization convention for tau and the scale factor. [k tau] from BMT99 is left invariant because it is the ratio [k/aH]. But [Omega_i,0 tau] from BMT99 must be replaced by [frac_i*om*tau/4]. Some doubts remain about the niv formulas, that should be recheked at some point. We also checked that for bi,cdi,nid, everything coincides exactly with the CAMB formulas. */ /** - --> (b.2.) Cold dark matter Isocurvature */ if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) { class_test(pba->has_cdm == _FALSE_, ppt->error_message, "not consistent to ask for CDI in absence of CDM!"); ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fraccdm*om*tau*(-2./3.+om*tau/4.); ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fraccdm*om*ktau_two/12.; ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; ppw->pv->y[ppw->pv->index_pt_delta_cdm] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) { delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g]; shear_ur = -ppr->entropy_ini*fraccdm*ktau_two*tau*om/6./(2.*fracnu+15.); } eta = -ppr->entropy_ini*fraccdm*om*tau*(1./6.-om*tau/16.); } /** - --> (b.3.) Baryon Isocurvature */ if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) { ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracb*om*tau*(-2./3.+om*tau/4.); ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracb*om*ktau_two/12.; ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; } if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) { delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g]; shear_ur = -ppr->entropy_ini*fracb*ktau_two*tau*om/6./(2.*fracnu+15.); } eta = -ppr->entropy_ini*fracb*om*tau*(1./6.-om*tau/16.); } /** - --> (b.4.) Neutrino density Isocurvature */ if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) { class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_), ppt->error_message, "not consistent to ask for NID in absence of ur or ncdm species!"); ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracnu/fracg*(-1.+ktau_two/6.); ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracnu/fracg*k*k*tau*(1./4.-fracb/fracg*3./16.*om*tau); ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini*fracnu/fracg/8.*ktau_two; ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*fracnu*fracb/fracg/80.*ktau_two*om*tau; } delta_ur = ppr->entropy_ini*(1.-ktau_two/6.); theta_ur = ppr->entropy_ini*k*k*tau/4.; shear_ur = ppr->entropy_ini*ktau_two/(4.*fracnu+15.)/2.; eta = -ppr->entropy_ini*fracnu/(4.*fracnu+15.)/6.*ktau_two; } /** - --> (b.5.) Neutrino velocity Isocurvature */ if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) { class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_), ppt->error_message, "not consistent to ask for NIV in absence of ur or ncdm species!"); ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*k*tau*fracnu/fracg* (1. - 3./16.*fracb*(2.+fracg)/fracg*om*tau); /* small diff wrt camb */ ppw->pv->y[ppw->pv->index_pt_theta_g] = ppr->entropy_ini*fracnu/fracg*3./4.*k* (-1.+3./4.*fracb/fracg*om*tau+3./16.*om*om*tau*tau*fracb/fracg/fracg*(fracg-3.*fracb)+ktau_two/6.); ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* small diff wrt camb */ ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*9./64.*fracnu*fracb/fracg*k*tau*om*tau; } delta_ur = -ppr->entropy_ini*k*tau*(1.+3./16.*fracb*fracnu/fracg*om*tau); /* small diff wrt camb */ theta_ur = ppr->entropy_ini*3./4.*k*(1. - 1./6.*ktau_two*(4.*fracnu+9.)/(4.*fracnu+5.)); shear_ur = ppr->entropy_ini/(4.*fracnu+15.)*k*tau*(1. + 3.*om*tau*fracnu/(4.*fracnu+15.)); /* small diff wrt camb */ eta = ppr->entropy_ini*fracnu*k*tau*(-1./(4.*fracnu+5.) + (-3./64.*fracb/fracg+15./4./(4.*fracnu+15.)/(4.*fracnu+5.)*om*tau)); /* small diff wrt camb */ } /** - (c) If the needed gauge is really the synchronous gauge, we need to affect the previously computed value of eta to the actual variable eta */ if (ppt->gauge == synchronous) { ppw->pv->y[ppw->pv->index_pt_eta] = eta; } /** - (d) If the needed gauge is the newtonian gauge, we must compute alpha and then perform a gauge transformation for each variable */ if (ppt->gauge == newtonian) { /* alpha is like in Ma & Bertschinger: (h'+6 eta')/(2k^2). We obtain it from the first two Einstein equations: alpha = [eta + 3/2 (a'/a)^2 (delta_rho/rho_c) / k^2 /s_2^2 + 3/2 (a'/a)^3 3 ((rho+p)theta/rho_c) / k^4 / s_2^2] / (a'/a) = [eta + 3/2 (a'/a)^2 / k^2 /s_2^2 {delta_tot + 3 (a'/a) /k^2 velocity_tot}] / (a'/a) with delta_tot = (delta_rho/rho_c) = [rho_r delta_r + rho_m delta_m] / (rho_r + rho_m) = [delta_r + (rho_m/rho_r) delta_m] / (1 + rho_m/rho_r) = [(f_g delta_g + f_nu delta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm delta_cdm)] / (1 + rho_m/rho_r) velocity_tot = ((rho+p)theta/rho_c) = [(4/3) rho_r theta_r + rho_m theta_m] / (rho_r + rho_m) = [(4/3) theta_r + (rho_m/rho_r) theta_m] / (1 + rho_m/rho_r) = [(4/3) (f_g theta_g + f_nu theta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm 0)] / (1 + rho_m/rho_r) */ if (pba->has_cdm == _TRUE_) delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_cdm]; else if (pba->has_dcdm == _TRUE_) delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_dcdm]; else delta_cdm=0.; // note: if there are no neutrinos, fracnu, delta_ur and theta_ur below will consistently be zero. delta_tot = (fracg*ppw->pv->y[ppw->pv->index_pt_delta_g]+fracnu*delta_ur+rho_m_over_rho_r*(fracb*ppw->pv->y[ppw->pv->index_pt_delta_b]+fraccdm*delta_cdm))/(1.+rho_m_over_rho_r); velocity_tot = ((4./3.)*(fracg*ppw->pv->y[ppw->pv->index_pt_theta_g]+fracnu*theta_ur) + rho_m_over_rho_r*fracb*ppw->pv->y[ppw->pv->index_pt_theta_b])/(1.+rho_m_over_rho_r); alpha = (eta + 3./2.*a_prime_over_a*a_prime_over_a/k/k/s2_squared*(delta_tot + 3.*a_prime_over_a/k/k*velocity_tot))/a_prime_over_a; ppw->pv->y[ppw->pv->index_pt_phi] = eta - a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_delta_g] -= 4.*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_g] += k*k*alpha; ppw->pv->y[ppw->pv->index_pt_delta_b] -= 3.*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_b] += k*k*alpha; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] -= 3.*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_cdm] = k*k*alpha; } if (pba->has_dcdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_dcdm] += (-3.*a_prime_over_a - a*pba->Gamma_dcdm)*alpha; ppw->pv->y[ppw->pv->index_pt_theta_dcdm] = k*k*alpha; } /* fluid */ if (pba->has_fld == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_fld] += 3*(1.+pba->w0_fld+pba->wa_fld)*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_fld] += k*k*alpha; } /* scalar field: check */ if (pba->has_scf == _TRUE_) { alpha_prime = 0.0; /* - 2. * a_prime_over_a * alpha + eta - 4.5 * (a2/k2) * ppw->rho_plus_p_shear; */ ppw->pv->y[ppw->pv->index_pt_phi_scf] += alpha*ppw->pvecback[pba->index_bg_phi_prime_scf]; ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] += (-2.*a_prime_over_a*alpha*ppw->pvecback[pba->index_bg_phi_prime_scf] -a*a* dV_scf(pba,ppw->pvecback[pba->index_bg_phi_scf])*alpha +ppw->pvecback[pba->index_bg_phi_prime_scf]*alpha_prime); } if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) { delta_ur -= 4.*a_prime_over_a*alpha; theta_ur += k*k*alpha; /* shear and l3 are gauge invariant */ if (pba->has_dr == _TRUE_) delta_dr += (-4.*a_prime_over_a + a*pba->Gamma_dcdm*ppw->pvecback[pba->index_bg_rho_dcdm]/ppw->pvecback[pba->index_bg_rho_dr])*alpha; } } /* end of gauge transformation to newtonian gauge */ /** - (e) In any gauge, we should now implement the relativistic initial conditions in ur and ncdm variables */ if (pba->has_ur == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_ur] = delta_ur; ppw->pv->y[ppw->pv->index_pt_theta_ur] = theta_ur; ppw->pv->y[ppw->pv->index_pt_shear_ur] = shear_ur; ppw->pv->y[ppw->pv->index_pt_l3_ur] = l3_ur; } if (pba->has_ncdm == _TRUE_) { idx = ppw->pv->index_pt_psi0_ncdm1; for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++) { q = pba->q_ncdm[n_ncdm][index_q]; epsilon = sqrt(q*q+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); ppw->pv->y[idx] = -0.25 * delta_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; ppw->pv->y[idx+1] = -epsilon/3./q/k*theta_ur* pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; ppw->pv->y[idx+2] = -0.5 * shear_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; ppw->pv->y[idx+3] = -0.25 * l3_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; //Jump to next momentum bin: idx += (ppw->pv->l_max_ncdm[n_ncdm]+1); } } } if (pba->has_dr == _TRUE_) { f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*ppw->pvecback[pba->index_bg_rho_dr]; ppw->pv->y[ppw->pv->index_pt_F0_dr] = delta_dr*f_dr; ppw->pv->y[ppw->pv->index_pt_F0_dr+1] = 4./(3.*k)*theta_ur*f_dr; ppw->pv->y[ppw->pv->index_pt_F0_dr+2] = 2.*shear_ur*f_dr; ppw->pv->y[ppw->pv->index_pt_F0_dr+3] = l3_ur*f_dr; } } /** --> For tensors */ if (_tensors_) { /** tensor initial conditions take into account the fact that scalar (resp. tensor) \f$ C_l\f$'s are related to the real space power spectrum of curvature (resp. of the tensor part of metric perturbations) \f[ <R(x) R(x)> \ \ \sum_{ij} <h_{ij}(x) h^{ij}(x)> \f] In momentum space it is conventional to use the modes R(k) and h(k) where the quantity h obeying to the equation of propagation: \f[ h'' + \frac{2a'}{a} h + [k2+2K] h = 12\pi Ga2 (\rho+p) \sigma = 8\pi Ga2 p \pi \f] and the power spectra in real space and momentum space are related through: \f[ <R(x) R(x)> = \int \frac{dk}{k} \left[ \frac{k^3}{2\pi^2} <R(k)R(k)^*>\right] = \int \frac{dk}{k} \mathcal{P}_R(k) \f] \f[\sum_{ij} <h_{ij}(x) h^{ij}(x)> = \frac{dk}{k} \left[ \frac{k^3}{2\pi^2} F\left(\frac{k^2}{K}\right) <h(k)h(k)^*>\right] = \int \frac{dk}{k} F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f] where \f$ \mathcal{P}_R\f$ and \f$ \mathcal{P}_h\f$ are the dimensionless spectrum of curvature R, and F is a function of k2/K, where K is the curvature parameter. F is equal to one in flat space (K=0), and coming from the contraction of the laplacian eigentensor \f$ Q_{ij}\f$ with itself. We will give F explicitly below. Similarly the scalar (S) and tensor (T) \f$ C_l\f$'s are given by \f[ C_l^S = 4\pi \int \frac{dk}{k} [\Delta_l^S(q)]^2 \mathcal{P}_R(k) \f] \f[ C_l^T = 4\pi \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f] The usual convention for the tensor-to-scalar ratio \f$ r = A_t / A_s \f$ at pivot scale = 16 epsilon in single-field inflation is such that for constant \f$ \mathcal{P}_R(k)\f$ and \f$ \mathcal{P}_h(k)\f$, \f[ r = 6 \frac{\mathcal{P}_h(k)}{\mathcal{P}_R(k)} \f] so \f[ \mathcal{P}_h(k) = \frac{\mathcal{P}_R(k) r}{6} = \frac{A_s r}{6} = \frac{A_t}{6} \f] A priori it would make sense to say that for a power-law primordial spectrum there is an extra factor \f$ (k/k_{pivot})^{n_t} \f$ (and eventually running and so on and so forth...) However it has been shown that the minimal models of inflation in a negatively curved bubble lead to \f$ \mathcal{P}_h(k)=\tanh(\pi*\nu/2)\f$. In open models it is customary to define the tensor tilt in a non-flat universe as a deviation from this behavior rather than from true scale-invariance in the above sense. Hence we should have \f[ \mathcal{P}_h(k) = \frac{A_t}{6} [ \tanh(\pi*\frac{\nu}{2})] (k/k_{pivot})^{(n_t+...)}\f] where the brackets \f[ [...] \f] mean "if K<0" Then \f[ C_l^T = 4\pi \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \frac{A_t}{6} [\tanh(\pi*\frac{\nu}{2})] (k/k_{pivot})^{(n_t+...)} \f] In the code, it is then a matter of choice to write: - In the primordial module: \f$ \mathcal{P}_h(k) = \frac{A_t}{6} \tanh{(\pi*\frac{\nu}{2})} (k/k^*)^{n_T}\f$ - In the perturbation initial conditions: \f$ h = 1\f$ - In the spectra module: \f$ C_l^T = \frac{4}{\pi} \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f$ or: - In the primordial module: \f$ \mathcal{P}_h(k) = A_t (k/k^*)^{n_T} \f$ - In the perturbation initial conditions: \f$ h = \sqrt{[F\left(\frac{k^2}{K}\right) / 6] \tanh{(\pi*\frac{\nu}{2})}} \f$ - In the spectra module: \f$ C_l^T = \frac{4}{\pi} \int \frac{dk}{k} [\Delta_l^T(q)]^2 \mathcal{P}_h(k) \f$ We choose this last option, such that the primordial and spectra module differ minimally in flat and non-flat space. Then we must impose \f[ h = \sqrt{\left(\frac{F}{6}\right) \tanh{(\pi*\frac{\nu}{2})}} \f] The factor F is found to be given by: \f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dk}{k} \frac{k2(k2-K)}{(k2+3K)(k2+2K)} \mathcal{P}_h(k) \f] Introducing as usual \f$ q2 = k2 - 3K \f$ and using qdq = kdk this gives \f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dk}{k} \frac{(q2-3K)(q2-4K)}{q2(q2-K)} \mathcal{P}_h(k) \f] Using qdq = kdk this is equivalent to \f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dq}{q} \frac{q2-4K}{q2-K} \mathcal{P}_h(k(q)) \f] Finally, introducing \f$ \nu=q/\sqrt{|K|}\f$ and sgnK=SIGN(k)\f$=\pm 1\f$, this could also be written \f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{d\nu}{\nu} \frac{(\nu2-4sgnK)}{(\nu2-sgnK)} \mathcal{P}_h(k(\nu)) \f] Equation (43,44) of Hu, Seljak, White, Zaldarriaga is equivalent to absorbing the above factor \f$ (\nu2-4sgnK)/(\nu2-sgnK)\f$ in the definition of the primordial spectrum. Since the initial condition should be written in terms of k rather than nu, they should read \f[ h = \sqrt{ [k2(k2-K)]/[(k2+3K)(k2+2K)] / 6 * \tanh{(\pi*\frac{\nu}{2})} } \f] We leave the freedom to multiply by an arbitrary number ppr->gw_ini. The standard convention corresponding to standard definitions of r, \f$ A_T\f$, \f$ n_T\f$ is however ppr->gw_ini=1. * */ if (index_ic == ppt->index_ic_ten) { ppw->pv->y[ppw->pv->index_pt_gw] = ppr->gw_ini/_SQRT6_; } k2 = k*k; if (pba->sgnK != 0) { ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(k2*(k2-pba->K)/(k2+3.*pba->K)/(k2+2.*pba->K)); } if (pba->sgnK == -1) { if (k*k+3*pba->K >= 0.) { ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(tanh(_PI_/2.*sqrt(k2+3*pba->K)/sqrt(-pba->K))); } else { ppw->pv->y[ppw->pv->index_pt_gw] = 0.; } } } return _SUCCESS_; } /** * Evaluate background/thermodynamics at \f$ \tau \f$, infer useful flags / time scales for integrating perturbations. * * Evaluate background quantities at \f$ \tau \f$, as well as thermodynamics for scalar mode; infer useful flags and time scales for integrating the perturbations: * - check whether tight-coupling approximation is needed. * - check whether radiation (photons, massless neutrinos...) perturbations are needed. * - choose step of integration: step = ppr->perturb_integration_stepsize * min_time_scale, where min_time_scale = smallest time scale involved in the equations. There are three time scales to compare: * -# that of recombination, \f$ \tau_c = 1/\kappa' \f$ * -# Hubble time scale, \f$ \tau_h = a/a' \f$ * -# Fourier mode, \f$ \tau_k = 1/k \f$ * * So, in general, min_time_scale = \f$ \min(\tau_c, \tau_b, \tau_h, \tau_k) \f$. * * However, if \f$ \tau_c \ll \tau_h \f$ and \f$ \tau_c * \ll \tau_k \f$, we can use the tight-coupling regime for photons * and write equations in such way that the time scale \f$ * \tau_c \f$ becomes irrelevant (no effective mass term in \f$ * 1/\tau_c \f$). Then, the smallest * scale in the equations is only \f$ \min(\tau_h, \tau_k) \f$. * In practise, it is sufficient to use only the condition \f$ \tau_c \ll \tau_h \f$. * * Also, if \f$ \rho_{matter} \gg \rho_{radiation} \f$ and \f$ k \gg * aH \f$, we can switch off radiation perturbations (i.e. switch on * the free-streaming approximation) and then the smallest scale is * simply \f$ \tau_h \f$. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param k Input: wavenumber * @param tau Input: conformal time * @param ppw Input/Output: in output contains the approximation to be used at this time * @return the error status */ int perturb_approximations( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, double tau, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ /* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */ double tau_k; /* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */ double tau_h; /* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */ double tau_c; /** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */ class_test(k == 0., ppt->error_message, "stop to avoid division by zero"); tau_k = 1./k; /** - evaluate background quantities with background_at_tau() and Hubble time scale \f$ \tau_h = a/a' \f$ */ class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); class_test(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a] == 0., ppt->error_message, "aH=0, stop to avoid division by zero"); tau_h = 1./(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a]); /** - for scalar modes: */ if (_scalars_) { /** - --> (a) evaluate thermodynamical quantities with thermodynamics_at_z() */ class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); /** - ---> (b.1.) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */ if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } /** - ---> (b.2.) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */ else { /** - ----> (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa]; class_test(tau_c < 0., ppt->error_message, "tau_c = 1/kappa' should always be positive unless there is something wrong in the thermodynamics module. However you have here tau_c=%e at z=%e, conformal time=%e x_e=%e. (This could come from the interpolation of a too poorly sampled reionisation history?).\n", tau_c, 1./ppw->pvecback[pba->index_bg_a]-1., tau, ppw->pvecthermo[pth->index_th_xe]); /** - ----> (b.2.b) check whether tight-coupling approximation should be on */ if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) && (tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) { ppw->approx[ppw->index_ap_tca] = (int)tca_on; } else { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } } /** - --> (c) free-streaming approximations */ if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) && (tau > pth->tau_free_streaming) && (ppr->radiation_streaming_approximation != rsa_none)) { ppw->approx[ppw->index_ap_rsa] = (int)rsa_on; } else { ppw->approx[ppw->index_ap_rsa] = (int)rsa_off; } if (pba->has_ur == _TRUE_) { if ((tau/tau_k > ppr->ur_fluid_trigger_tau_over_tau_k) && (ppr->ur_fluid_approximation != ufa_none)) { ppw->approx[ppw->index_ap_ufa] = (int)ufa_on; } else { ppw->approx[ppw->index_ap_ufa] = (int)ufa_off; } } if (pba->has_ncdm == _TRUE_) { if ((tau/tau_k > ppr->ncdm_fluid_trigger_tau_over_tau_k) && (ppr->ncdm_fluid_approximation != ncdmfa_none)) { ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_on; } else { ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_off; } } } /** - for tensor modes: */ if (_tensors_) { /** - --> (a) evaluate thermodynamical quantities with thermodynamics_at_z() */ class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); /** - ---> (b.1.) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */ if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } /** - ---> (b.2.) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */ else { /** - ----> (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa]; /** - ----> (b.2.b) check whether tight-coupling approximation should be on */ if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) && (tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) { ppw->approx[ppw->index_ap_tca] = (int)tca_on; } else { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } } if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) && (tau > pth->tau_free_streaming) && (ppr->radiation_streaming_approximation != rsa_none)) { ppw->approx[ppw->index_ap_rsa] = (int)rsa_on; } else { ppw->approx[ppw->index_ap_rsa] = (int)rsa_off; } } return _SUCCESS_; } /** * Compute typical timescale over which the perturbation equations * vary. Some integrators (e.g. Runge-Kunta) benefit from calling this * routine at each step in order to adapt the next step. * * This is one of the few functions in the code which is passed to the generic_integrator() routine. * Since generic_integrator() should work with functions passed from various modules, the format of the arguments * is a bit special: * - fixed parameters and workspaces are passed through a generic pointer. * generic_integrator() doesn't know the content of this pointer. * - the error management is a bit special: errors are not written as usual to pth->error_message, but to a generic * error_message passed in the list of arguments. * * @param tau Input: conformal time * @param parameters_and_workspace Input: fixed parameters (e.g. indices), workspace, approximation used, etc. * @param timescale Output: perturbation variation timescale (given the approximation used) * @param error_message Output: error message */ int perturb_timescale( double tau, void * parameters_and_workspace, double * timescale, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ /* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */ double tau_k; /* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */ double tau_h; /* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */ double tau_c; /* various pointers allowing to extract the fields of the parameter_and_workspace input structure */ struct perturb_parameters_and_workspace * pppaw; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; /** - extract the fields of the parameter_and_workspace input structure */ pppaw = parameters_and_workspace; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; /** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */ class_test(pppaw->k == 0., ppt->error_message, "stop to avoid division by zero"); tau_k = 1./pppaw->k; /** - evaluate background quantities with background_at_tau() and Hubble time scale \f$ \tau_h = a/a' \f$ */ class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), pvecback), pba->error_message, error_message); class_test(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] == 0., error_message, "aH=0, stop to avoid division by zero"); tau_h = 1./(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]); /** - for scalars modes: */ if ((ppt->has_scalars == _TRUE_) && (pppaw->index_md == ppt->index_md_scalars)) { *timescale = tau_h; if ((ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) || (pba->has_ncdm == _TRUE_)) *timescale = MIN(tau_k,*timescale); if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); if (pvecthermo[pth->index_th_dkappa] != 0.) { /** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; *timescale = MIN(tau_c,*timescale); } } } /** - for vector modes: */ if ((ppt->has_vectors == _TRUE_) && (pppaw->index_md == ppt->index_md_vectors)) { *timescale = MIN(tau_h,tau_k); if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); if (pvecthermo[pth->index_th_dkappa] != 0.) { /** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; *timescale = MIN(tau_c,*timescale); } } } /** - for tensor modes: */ if ((ppt->has_tensors == _TRUE_) && (pppaw->index_md == ppt->index_md_tensors)) { *timescale = MIN(tau_h,tau_k); if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); if (pvecthermo[pth->index_th_dkappa] != 0.) { /** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; *timescale = MIN(tau_c,*timescale); } } } return _SUCCESS_; } /** * Compute metric perturbations (those not integrated over time) using Einstein equations * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param k Input: wavenumber * @param tau Input: conformal time * @param y Input: vector of perturbations (those integrated over time) (already allocated) * @param ppw Input/Output: in output contains the updated metric perturbations * @return the error status */ int perturb_einstein( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, double tau, double * y, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ double k2,a,a2,a_prime_over_a; double s2_squared; double shear_g = 0.; /** - define wavenumber and scale factor related quantities */ k2 = k*k; a = ppw->pvecback[pba->index_bg_a]; a2 = a * a; a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a; s2_squared = 1.-3.*pba->K/k2; /** - sum up perturbations from all species */ class_call(perturb_total_stress_energy(ppr,pba,pth,ppt,index_md,k,y,ppw), ppt->error_message, ppt->error_message); /** - for scalar modes: */ if (_scalars_) { /** - --> infer metric perturbations from Einstein equations */ /* newtonian gauge */ if (ppt->gauge == newtonian) { /* in principle we could get phi from the constrain equation: ppw->pvecmetric[ppw->index_mt_phi] = -1.5 * (a2/k2/k2/s2/s2) * (k2 * delta_rho + 3.*a_prime_over_a * rho_plus_p_theta); with s2_squared = sqrt(1-3K/k2) = ppw->s_l[2]*ppw->s_l[2] This was the case in class v1.3. However the integration is more stable is we treat phi as a dynamical variable y[ppw->pv->index_pt_phi], which derivative is given by the second equation below (credits to Guido Walter Pettinari). */ /* equation for psi */ ppw->pvecmetric[ppw->index_mt_psi] = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k2) * ppw->rho_plus_p_shear; /* equation for phi' */ ppw->pvecmetric[ppw->index_mt_phi_prime] = -a_prime_over_a * ppw->pvecmetric[ppw->index_mt_psi] + 1.5 * (a2/k2) * ppw->rho_plus_p_theta; /* eventually, infer radiation streaming approximation for gamma and ur (this is exactly the right place to do it because the result depends on h_prime) */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw), ppt->error_message, ppt->error_message); } } /* synchronous gauge */ if (ppt->gauge == synchronous) { /* first equation involving total density fluctuation */ ppw->pvecmetric[ppw->index_mt_h_prime] = ( k2 * s2_squared * y[ppw->pv->index_pt_eta] + 1.5 * a2 * ppw->delta_rho)/(0.5*a_prime_over_a); /* h' */ /* eventually, infer radiation streaming approximation for gamma and ur (this is exactly the right place to do it because the result depends on h_prime) */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw), ppt->error_message, ppt->error_message); /* update total theta given rsa approximation results */ ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*ppw->rsa_theta_g; if (pba->has_ur == _TRUE_) { ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*ppw->rsa_theta_ur; } } /* second equation involving total velocity */ ppw->pvecmetric[ppw->index_mt_eta_prime] = (1.5 * a2 * ppw->rho_plus_p_theta + 0.5 * pba->K * ppw->pvecmetric[ppw->index_mt_h_prime])/k2/s2_squared; /* eta' */ /* third equation involving total pressure */ ppw->pvecmetric[ppw->index_mt_h_prime_prime] = - 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_h_prime] + 2. * k2 * s2_squared * y[ppw->pv->index_pt_eta] - 9. * a2 * ppw->delta_p; /* alpha = (h'+6eta')/2k^2 */ ppw->pvecmetric[ppw->index_mt_alpha] = (ppw->pvecmetric[ppw->index_mt_h_prime] + 6.*ppw->pvecmetric[ppw->index_mt_eta_prime])/2./k2; /* eventually, infer first-order tight-coupling approximation for photon shear, then correct the total shear */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) { shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_g]+k2*ppw->pvecmetric[ppw->index_mt_alpha]); ppw->rho_plus_p_shear += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g; } /* fourth equation involving total shear */ ppw->pvecmetric[ppw->index_mt_alpha_prime] = //TBC - 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_alpha] + y[ppw->pv->index_pt_eta] - 4.5 * (a2/k2) * ppw->rho_plus_p_shear; } /* transform (delta_m, theta_m) of the current gauge into gauge-independent variables (you could comment this out if you really want gauge-dependent results) */ if (ppt->has_source_delta_m == _TRUE_) { ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2; // note: until 2.4.3 there was a typo, the factor was (-2 H'/H) instead // of (3 aH). There is the same typo in the CLASSgal paper // 1307.1459v1,v2,v3. It came from a confusion between (1+w_total) // and (1+w_matter)=1 [the latter is the relevant one here]. // // note2: at this point this gauge-invariant variable is only // valid if all matter components are pressureless and // stable. This relation will be generalized soon to the case // of decaying dark matter. } if (ppt->has_source_theta_m == _TRUE_) { if (ppt->gauge == synchronous) { ppw->theta_m += ppw->pvecmetric[ppw->index_mt_alpha]*k2; } } } /** - for vector modes */ if (_vectors_) { if (ppt->gauge == newtonian) { ppw->pvecmetric[ppw->index_mt_V_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_V] - 3.*ppw->vector_source_pi/k; } if (ppt->gauge == synchronous) { // assuming vector_source_pi = p_class a^2 pi_T^{(1)} and vector_source_v = (rho_class+p_class)a^2 v^{(1)} // from Hu and White: ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi/k2; // what we suspect: //ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi; // if we use the other equation: //ppw->pvecmetric[ppw->index_mt_hv_prime] = -2./k/ (1.-2.*pba->K/k2) * 3. * ppw->vector_source_v; } } /** - for tensor modes */ if (_tensors_) { /* single einstein equation for tensor perturbations */ ppw->pvecmetric[ppw->index_mt_gw_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_gwdot]-(k2+2.*pba->K)*y[ppw->pv->index_pt_gw]+ppw->gw_source; } return _SUCCESS_; } int perturb_total_stress_energy( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, double * y, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ double a,a2; double delta_g=0.; double theta_g=0.; double shear_g=0.; double delta_ur=0.; double theta_ur=0.; double shear_ur=0.; double rho_delta_ncdm=0.; double rho_plus_p_theta_ncdm=0.; double rho_plus_p_shear_ncdm=0.; double delta_p_ncdm=0.; double factor; double rho_plus_p_ncdm; int index_q,n_ncdm,idx; double epsilon,q,q2,cg2_ncdm,w_ncdm,rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm; double rho_m,delta_rho_m,rho_plus_p_m,rho_plus_p_theta_m; double w; double gwncdm; double rho_relativistic; double rho_dr_over_f; double delta_rho_scf, delta_p_scf, psi; /** - wavenumber and scale factor related quantities */ a = ppw->pvecback[pba->index_bg_a]; a2 = a * a; /** - for scalar modes */ if (_scalars_) { /** - --> (a) deal with approximation schemes */ /** - ---> (a.1.) photons */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /** - ----> (a.1.1.) no approximation */ delta_g = y[ppw->pv->index_pt_delta_g]; theta_g = y[ppw->pv->index_pt_theta_g]; shear_g = y[ppw->pv->index_pt_shear_g]; } else { /** - ----> (a.1.2.) radiation streaming approximation */ delta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */ theta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */ shear_g = 0.; /* shear always neglected in radiation streaming approximation */ } } else { /** - ----> (a.1.3.) tight coupling approximation */ delta_g = y[ppw->pv->index_pt_delta_g]; theta_g = y[ppw->pv->index_pt_theta_g]; /* first-order tight-coupling approximation for photon shear */ if (ppt->gauge == newtonian) { shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_g]; } else { shear_g = 0.; /* in the synchronous gauge, the expression of shear_g (at first-order in a tight-coupling expansion) is a function of h' and eta'; but h' and eta' are calculated in perturb_einstein() as a function of delta_g and theta_g. Hence, we set shear_g temporarily to zero, and set it to the right first-order value in perturb_einstein(), just before using the Einstein equation for the shear. */ } } /** - ---> (a.2.) ur */ if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { delta_ur = y[ppw->pv->index_pt_delta_ur]; theta_ur = y[ppw->pv->index_pt_theta_ur]; shear_ur = y[ppw->pv->index_pt_shear_ur]; } else { delta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */ theta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */ shear_ur = 0.; /* shear always neglected in free streaming approximation */ } } /** - --> (b) compute the total density, velocity and shear perturbations */ /* photon and baryon contribution */ ppw->delta_rho = ppw->pvecback[pba->index_bg_rho_g]*delta_g + ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b]; ppw->rho_plus_p_theta = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*theta_g + ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b]; ppw->rho_plus_p_shear = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g; ppw->delta_p = 1./3.*ppw->pvecback[pba->index_bg_rho_g]*delta_g + ppw->pvecthermo[pth->index_th_cb2]*ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b]; /* cdm contribution */ if (pba->has_cdm == _TRUE_) { ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm]; if (ppt->gauge == newtonian) ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm]; } /* dcdm contribution */ if (pba->has_dcdm == _TRUE_) { ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm]; ppw->rho_plus_p_theta += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm]; } /* fluid contribution */ if (pba->has_fld == _TRUE_) { w = pba->w0_fld + pba->wa_fld * (1. - a / pba->a_today); ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_delta_fld]; ppw->rho_plus_p_theta += (1.+w)*ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_theta_fld]; ppw->delta_p = ppw->delta_p + pba->cs2_fld * ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_delta_fld]; } /* ultra-relativistic decay radiation */ if (pba->has_dr == _TRUE_) { /* We have delta_rho_dr = rho_dr * F0_dr / f, where F follows the convention in astro-ph/9907388 and f is defined as f = rho_dr*a^4/rho_crit_today. In CLASS density units rho_crit_today = H0^2. */ rho_dr_over_f = pow(pba->H0/a2,2); ppw->delta_rho += rho_dr_over_f*y[ppw->pv->index_pt_F0_dr]; ppw->rho_plus_p_theta += 4./3.*3./4*k*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+1]; ppw->rho_plus_p_shear += 2./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+2]; ppw->delta_p += 1./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr]; } /* ultra-relativistic neutrino/relics contribution */ if (pba->has_ur == _TRUE_) { ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_ur]*delta_ur; ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*theta_ur; ppw->rho_plus_p_shear = ppw->rho_plus_p_shear + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*shear_ur; ppw->delta_p += 1./3.*ppw->pvecback[pba->index_bg_rho_ur]*delta_ur; } /* non-cold dark matter contribution */ if (pba->has_ncdm == _TRUE_) { idx = ppw->pv->index_pt_psi0_ncdm1; if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){ // The perturbations are evolved integrated: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_ncdm_bg = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; p_ncdm_bg = ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]; pseudo_p_ncdm = ppw->pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; rho_plus_p_ncdm = rho_ncdm_bg + p_ncdm_bg; w_ncdm = p_ncdm_bg/rho_ncdm_bg; cg2_ncdm = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg)); if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { ppw->delta_ncdm[n_ncdm] = y[idx]; ppw->theta_ncdm[n_ncdm] = y[idx+1]; ppw->shear_ncdm[n_ncdm] = y[idx+2]; } ppw->delta_rho += rho_ncdm_bg*y[idx]; ppw->rho_plus_p_theta += rho_plus_p_ncdm*y[idx+1]; ppw->rho_plus_p_shear += rho_plus_p_ncdm*y[idx+2]; ppw->delta_p += cg2_ncdm*rho_ncdm_bg*y[idx]; idx += ppw->pv->l_max_ncdm[n_ncdm]+1; } } else{ // We must integrate to find perturbations: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_delta_ncdm = 0.0; rho_plus_p_theta_ncdm = 0.0; rho_plus_p_shear_ncdm = 0.0; delta_p_ncdm = 0.0; factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) { q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2); rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1]; rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2]; delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; //Jump to next momentum bin: idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1); } rho_delta_ncdm *= factor; rho_plus_p_theta_ncdm *= k*factor; rho_plus_p_shear_ncdm *= 2.0/3.0*factor; delta_p_ncdm *= factor/3.; if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { ppw->delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; ppw->theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); ppw->shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); } ppw->delta_rho += rho_delta_ncdm; ppw->rho_plus_p_theta += rho_plus_p_theta_ncdm; ppw->rho_plus_p_shear += rho_plus_p_shear_ncdm; ppw->delta_p += delta_p_ncdm; } } } /* scalar field contribution. In Newtonian gauge, delta_scf depends on the metric perturbation psi which is inferred from rho_plus_p_shear. So the contribution from the scalar field must be below all species with non-zero shear. */ if (pba->has_scf == _TRUE_) { if (ppt->gauge == synchronous){ delta_rho_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]); delta_p_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] - ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]); } else{ /* equation for psi */ psi = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k/k) * ppw->rho_plus_p_shear; delta_rho_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf] - 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi); delta_p_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] - ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf] - 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi); } ppw->delta_rho += delta_rho_scf; ppw->rho_plus_p_theta += 1./3.* k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf]; ppw->delta_p += delta_p_scf; } /* store delta_m in the current gauge. In perturb_einstein, this will be transformed later on into the gauge-independent variable D = delta_m - 2H'/H \theta_m/k^2 . */ if (ppt->has_source_delta_m == _TRUE_) { /* include baryons and cold dark matter */ delta_rho_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b]; rho_m = ppw->pvecback[pba->index_bg_rho_b]; if (pba->has_cdm == _TRUE_) { delta_rho_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm]; rho_m += ppw->pvecback[pba->index_bg_rho_cdm]; } /* include decaying cold dark matter */ if (pba->has_dcdm == _TRUE_) { delta_rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm]; rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]; } /* include any other species non-relativistic today (like ncdm species) */ if (pba->has_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ delta_rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]*ppw->delta_ncdm[n_ncdm]; rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; } } /* infer delta_m */ ppw->delta_m = delta_rho_m/rho_m; } /* store theta_m in the current gauge. In perturb_einstein, this will be transformed later on into the gauge-independent variable Theta . Note that computing theta_m is necessary also if we want the delta_m source only, because the gauge-invariant delta_m involves theta_m in the current gauge. */ if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) { /* include baryons and cold dark matter */ rho_plus_p_theta_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b]; rho_plus_p_m = ppw->pvecback[pba->index_bg_rho_b]; if (pba->has_cdm == _TRUE_) { if (ppt->gauge == newtonian) rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm]; rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_cdm]; } if (pba->has_dcdm == _TRUE_) { rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm]; rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_dcdm]; } /* include any other species non-relativistic today (like ncdm species) */ if (pba->has_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_plus_p_theta_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm])*ppw->theta_ncdm[n_ncdm]; rho_plus_p_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); } } /* infer theta_m */ ppw->theta_m = rho_plus_p_theta_m/rho_plus_p_m; } } /** - for vector modes */ if (_vectors_) { ppw->vector_source_pi = 0.; ppw->vector_source_v = 0.; /** - --> photon contribution to vector sources: */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppw->vector_source_v += 4./3.*a2*ppw->pvecback[pba->index_bg_rho_g] * (-1./4.*_SQRT2_) * (y[ppw->pv->index_pt_delta_g]+2.*y[ppw->pv->index_pt_delta_g]+y[ppw->pv->index_pt_shear_g]); ppw->vector_source_pi += 1./3.*a2*ppw->pvecback[pba->index_bg_rho_g] * (6.*_SQRT2_/5./sqrt(1.-2.*pba->K/k/k)) * (4./3./k*y[ppw->pv->index_pt_theta_g]+y[ppw->pv->index_pt_l3_g]); } } /** - --> baryons */ } /** - for tensor modes */ if (_tensors_) { ppw->gw_source = 0.0; /** - --> photon contribution to gravitational wave source: */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppw->gw_source += (-_SQRT6_*4*a2*ppw->pvecback[pba->index_bg_rho_g]* (1./15.*y[ppw->pv->index_pt_delta_g]+ 4./21.*y[ppw->pv->index_pt_shear_g]+ 1./35.*y[ppw->pv->index_pt_l3_g+1])); } } /** - --> ur contribution to gravitational wave source: */ if (ppt->evolve_tensor_ur == _TRUE_){ rho_relativistic = 0.; if (ppt->tensor_method == tm_exact) rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur]; if (ppt->tensor_method == tm_massless_approximation) { if (pba->has_ur == _TRUE_) rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur]; if (pba->has_ncdm == _TRUE_) { for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++) { /* (3 p_ncdm1) is the "relativistic" contribution to rho_ncdm1 */ rho_relativistic += 3.*ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]; } } } ppw->gw_source += (-_SQRT6_*4*a2*rho_relativistic* (1./15.*y[ppw->pv->index_pt_delta_ur]+ 4./21.*y[ppw->pv->index_pt_shear_ur]+ 1./35.*y[ppw->pv->index_pt_l3_ur+1])); } /** - --> ncdm contribution to gravitational wave source: */ if (ppt->evolve_tensor_ncdm == _TRUE_){ idx = ppw->pv->index_pt_psi0_ncdm1; // We must integrate to find perturbations: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ gwncdm = 0.; factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) { q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2); gwncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*(1./15.*y[idx]+2./21.*y[idx+2]+1./35.*y[idx+4]); //Jump to next momentum bin: idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1); } gwncdm *= -_SQRT6_*4*a2*factor; ppw->gw_source += gwncdm; } } } return _SUCCESS_; } /** * Compute the source functions (three terms for temperature, one for * E or B modes, etc.) * * This is one of the few functions in the code which is passed to * the generic_integrator() routine. Since generic_integrator() * should work with functions passed from various modules, the format * of the arguments is a bit special: * * - fixed parameters and workspaces are passed through a generic * pointer. generic_integrator() doesn't know the content of this * pointer. * * - the error management is a bit special: errors are not written as * usual to pth->error_message, but to a generic error_message passed * in the list of arguments. * * @param tau Input: conformal time * @param y Input: vector of perturbations * @param dy Input: vector of time derivative of perturbations * @param index_tau Input: index in the array tau_sampling * @param parameters_and_workspace Input/Output: in input, all parameters needed by perturb_derivs, in output, source terms * @param error_message Output: error message * @return the error status */ int perturb_sources( double tau, double * y, double * dy, int index_tau, void * parameters_and_workspace, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ double P; int index_type; struct perturb_parameters_and_workspace * pppaw; struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; int index_md; int index_ic; int index_k; double k; double z; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; double * pvecmetric; double delta_g, delta_rho_scf, rho_plus_p_theta_scf; double a_prime_over_a=0.; /* (a'/a) */ double a_prime_over_a_prime=0.; /* (a'/a)' */ int switch_isw = 1; double a_rel, a2_rel, f_dr; /** - rename structure fields (just to avoid heavy notations) */ pppaw = parameters_and_workspace; ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; index_md = pppaw->index_md; index_ic = pppaw->index_ic; index_k = pppaw->index_k; k = pppaw->k; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; pvecmetric = ppw->pvecmetric; /** - get background/thermo quantities in this point */ class_call(background_at_tau(pba, tau, pba->normal_info, pba->inter_closeby, &(ppw->last_index_back), pvecback), pba->error_message, error_message); z = pba->a_today/pvecback[pba->index_bg_a]-1.; class_call(thermodynamics_at_z(pba, pth, z, /* redshift z=1/a-1 */ pth->inter_closeby, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); a_rel = ppw->pvecback[pba->index_bg_a]/pba->a_today; a2_rel = a_rel * a_rel; /* derived background quantities, useful only in synchronous gauge */ if (ppt->gauge == synchronous) { a_prime_over_a = pvecback[pba->index_bg_a] * pvecback[pba->index_bg_H]; /* (a'/a)=aH */ a_prime_over_a_prime = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + pow(pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a],2); /* (a'/a)' = aH'+(aH)^2 */ } /** - for scalars */ if (_scalars_) { /** - --> compute metric perturbations */ class_call(perturb_einstein(ppr, pba, pth, ppt, index_md, k, tau, y, ppw), ppt->error_message, error_message); /** - --> compute quantities depending on approximation schemes */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { delta_g = ppw->rsa_delta_g; P = 0.; } else { delta_g = y[ppw->pv->index_pt_delta_g]; if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) P = 5.* ppw->s_l[2] * ppw->tca_shear_g/8.; /* (2.5+0.5+2)shear_g/8 */ else P = (y[ppw->pv->index_pt_pol0_g] + y[ppw->pv->index_pt_pol2_g] + 2.* ppw->s_l[2] *y[ppw->pv->index_pt_shear_g])/8.; } /** - --> for each type, compute source terms */ /* scalar temperature */ if (ppt->has_source_t == _TRUE_) { /* check whether integrated Sachs-Wolf term should be included */ if ((ppt->switch_eisw == 0) && (z >= ppt->eisw_lisw_split_z)){ switch_isw = 0; } if ((ppt->switch_lisw == 0) && (z < ppt->eisw_lisw_split_z)) { switch_isw=0; } /* newtonian gauge: simplest form, not efficient numerically */ /* if (ppt->gauge == newtonian) { _set_source_(ppt->index_tp_t0) = pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_phi_prime] + pvecthermo[pth->index_th_g] * delta_g / 4.; _set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_exp_m_kappa] * k* pvecmetric[ppw->index_mt_psi] + pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b]/k; _set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_g] * P; } */ /* newtonian gauge: slightly more complicated form, but more efficient numerically */ if (ppt->gauge == newtonian) { _set_source_(ppt->index_tp_t0) = ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g / 4. + pvecmetric[ppw->index_mt_psi]) + switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_phi]-pvecmetric[ppw->index_mt_psi]) + pvecthermo[pth->index_th_exp_m_kappa] * 2. * pvecmetric[ppw->index_mt_phi_prime]) + ppt->switch_dop /k/k * (pvecthermo[pth->index_th_g] * dy[ppw->pv->index_pt_theta_b] + pvecthermo[pth->index_th_dg] * y[ppw->pv->index_pt_theta_b]); _set_source_(ppt->index_tp_t1) = switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k* (pvecmetric[ppw->index_mt_psi]-y[ppw->pv->index_pt_phi]); _set_source_(ppt->index_tp_t2) = ppt->switch_pol * pvecthermo[pth->index_th_g] * P; } /* synchronous gauge: simplest form, not efficient numerically */ /* if (ppt->gauge == synchronous) { _set_source_(ppt->index_tp_t0) = - pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_h_prime] / 6. + pvecthermo[pth->index_th_g] / 4. * delta_g; _set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b] / k; _set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_exp_m_kappa] * k*k* 2./3. * ppw->s_l[2] * pvecmetric[ppw->index_mt_alpha] + pvecthermo[pth->index_th_g] * P; } */ /* synchronous gauge: slightly more complicated form, but more efficient numerically */ if (ppt->gauge == synchronous) { _set_source_(ppt->index_tp_t0) = ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g/4. + pvecmetric[ppw->index_mt_alpha_prime]) + switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_eta] - pvecmetric[ppw->index_mt_alpha_prime] - 2 * a_prime_over_a * pvecmetric[ppw->index_mt_alpha]) + pvecthermo[pth->index_th_exp_m_kappa] * 2. * (pvecmetric[ppw->index_mt_eta_prime] - a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime])) + ppt->switch_dop * (pvecthermo[pth->index_th_g] * (dy[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha_prime]) +pvecthermo[pth->index_th_dg] * (y[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha])); _set_source_(ppt->index_tp_t1) = switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k * (pvecmetric[ppw->index_mt_alpha_prime] + 2. * a_prime_over_a * pvecmetric[ppw->index_mt_alpha] - y[ppw->pv->index_pt_eta]); _set_source_(ppt->index_tp_t2) = ppt->switch_pol * pvecthermo[pth->index_th_g] * P; } } /* scalar polarization */ if (ppt->has_source_p == _TRUE_) { /* all gauges. Note that the correct formula for the E source should have a minus sign, as shown in Hu & White. We put a plus sign to comply with the 'historical convention' established in CMBFAST and CAMB. */ _set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P; } /* now, non-CMB sources */ /* Bardeen potential -PHI_H = phi in Newtonian gauge */ if (ppt->has_source_phi == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_phi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_eta] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha]; } /* its derivative phi' */ if (ppt->has_source_phi_prime == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_phi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_eta] - a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime]; } /* diff of Bardeen potentials PHI_A-PHI_H = psi + phi in newtonian gauge */ if (ppt->has_source_phi_plus_psi == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_phi_plus_psi) = y[ppw->pv->index_pt_phi] + pvecmetric[ppw->index_mt_psi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_phi_plus_psi) = y[ppw->pv->index_pt_eta] + pvecmetric[ppw->index_mt_alpha_prime]; } /* Bardeen potential PHI_A = psi in newtonian gauge */ if (ppt->has_source_psi == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_psi) = pvecmetric[ppw->index_mt_psi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_psi) = a_prime_over_a * pvecmetric[ppw->index_mt_alpha] + pvecmetric[ppw->index_mt_alpha_prime]; } /* total matter over density (gauge-invariant, defined as in arXiv:1307.1459) */ if (ppt->has_source_delta_m == _TRUE_) { _set_source_(ppt->index_tp_delta_m) = ppw->delta_m; } /* delta_g */ if (ppt->has_source_delta_g == _TRUE_) { _set_source_(ppt->index_tp_delta_g) = delta_g; } /* delta_baryon */ if (ppt->has_source_delta_b == _TRUE_) { _set_source_(ppt->index_tp_delta_b) = y[ppw->pv->index_pt_delta_b]; } /* delta_cdm */ if (ppt->has_source_delta_cdm == _TRUE_) { _set_source_(ppt->index_tp_delta_cdm) = y[ppw->pv->index_pt_delta_cdm]; } /* delta_dcdm */ if (ppt->has_source_delta_dcdm == _TRUE_) { _set_source_(ppt->index_tp_delta_dcdm) = y[ppw->pv->index_pt_delta_dcdm]; } /* delta_fld */ if (ppt->has_source_delta_fld == _TRUE_) { _set_source_(ppt->index_tp_delta_fld) = y[ppw->pv->index_pt_delta_fld]; } /* delta_scf */ if (ppt->has_source_delta_scf == _TRUE_) { if (ppt->gauge == synchronous){ delta_rho_scf = 1./3.* (1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]); } else{ delta_rho_scf = 1./3.* (1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf] - 1./a2_rel*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]); } _set_source_(ppt->index_tp_delta_scf) = delta_rho_scf/pvecback[pba->index_bg_rho_scf]; } /* delta_dr */ if (ppt->has_source_delta_dr == _TRUE_) { f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; _set_source_(ppt->index_tp_delta_dr) = y[ppw->pv->index_pt_F0_dr]/f_dr; } /* delta_ur */ if (ppt->has_source_delta_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) _set_source_(ppt->index_tp_delta_ur) = y[ppw->pv->index_pt_delta_ur]; else _set_source_(ppt->index_tp_delta_ur) = ppw->rsa_delta_ur; } /* delta_ncdm1 */ if (ppt->has_source_delta_ncdm == _TRUE_) { for (index_type = ppt->index_tp_delta_ncdm1; index_type < ppt->index_tp_delta_ncdm1+pba->N_ncdm; index_type++) { _set_source_(index_type) = ppw->delta_ncdm[index_type - ppt->index_tp_delta_ncdm1]; } } /* total velocity (gauge-invariant, defined as in arXiv:1307.1459) */ if (ppt->has_source_theta_m == _TRUE_) { _set_source_(ppt->index_tp_theta_m) = ppw->theta_m; } /* theta_g */ if (ppt->has_source_theta_g == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) _set_source_(ppt->index_tp_theta_g) = y[ppw->pv->index_pt_theta_g]; else _set_source_(ppt->index_tp_theta_g) = ppw->rsa_theta_g; } /* theta_baryon */ if (ppt->has_source_theta_b == _TRUE_) { _set_source_(ppt->index_tp_theta_b) = y[ppw->pv->index_pt_theta_b]; } /* theta_cdm */ if (ppt->has_source_theta_cdm == _TRUE_) { _set_source_(ppt->index_tp_theta_cdm) = y[ppw->pv->index_pt_theta_cdm]; } /* theta_dcdm */ if (ppt->has_source_theta_dcdm == _TRUE_) { _set_source_(ppt->index_tp_theta_dcdm) = y[ppw->pv->index_pt_theta_dcdm]; } /* theta_fld */ if (ppt->has_source_theta_fld == _TRUE_) { _set_source_(ppt->index_tp_theta_fld) = y[ppw->pv->index_pt_theta_fld]; } /* theta_scf */ if (ppt->has_source_theta_scf == _TRUE_) { rho_plus_p_theta_scf = 1./3.* k*k/a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf]; _set_source_(ppt->index_tp_theta_scf) = rho_plus_p_theta_scf/ (pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]); } /* theta_dr */ if (ppt->has_source_theta_dr == _TRUE_) { f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; _set_source_(ppt->index_tp_theta_dr) = 3./4.*k*y[ppw->pv->index_pt_F0_dr+1]/f_dr; } /* theta_ur */ if (ppt->has_source_theta_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) _set_source_(ppt->index_tp_theta_ur) = y[ppw->pv->index_pt_theta_ur]; else _set_source_(ppt->index_tp_theta_ur) = ppw->rsa_theta_ur; } /* theta_ncdm1 */ if (ppt->has_source_theta_ncdm == _TRUE_) { for (index_type = ppt->index_tp_theta_ncdm1; index_type < ppt->index_tp_theta_ncdm1+pba->N_ncdm; index_type++) { _set_source_(index_type) = ppw->theta_ncdm[index_type - ppt->index_tp_theta_ncdm1]; } } } /** - for tensors */ if (_tensors_) { /** - --> compute quantities depending on approximation schemes */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { P = -(1./10.*y[ppw->pv->index_pt_delta_g] +2./7.*y[ppw->pv->index_pt_shear_g] +3./70.*y[ppw->pv->index_pt_delta_g+4] -3./5.*y[ppw->pv->index_pt_pol0_g] +6./7.*y[ppw->pv->index_pt_pol2_g] -3./70.*y[ppw->pv->index_pt_pol0_g+4]) /sqrt(6.); } else { P = 2./5.*_SQRT6_*y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC } } else { P = 0.; } /* tensor temperature */ if (ppt->has_source_t == _TRUE_) { _set_source_(ppt->index_tp_t2) = - y[ppw->pv->index_pt_gwdot] * pvecthermo[pth->index_th_exp_m_kappa] + pvecthermo[pth->index_th_g] * P; } /* tensor polarization */ if (ppt->has_source_p == _TRUE_) { /* Note that the correct formula for the polarization source should have a minus sign, as shown in Hu & White. We put a plus sign to comply with the 'historical convention' established in CMBFAST and CAMB. */ _set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P; } } return _SUCCESS_; } /** * When testing the code or a cosmological model, it can be useful to * output perturbations at each step of integration (and not just the * delta's at each source sampling point, which is achieved simply by * asking for matter transfer functions). Then this function can be * passed to the generic_evolver routine. * * By default, instead of passing this function to generic_evolver, * one passes a null pointer. Then this function is just not used. * * @param tau Input: conformal time * @param y Input: vector of perturbations * @param dy Input: vector of its derivatives (already allocated) * @param parameters_and_workspace Input: fixed parameters (e.g. indices) * @param error_message Output: error message * */ int perturb_print_variables(double tau, double * y, double * dy, void * parameters_and_workspace, ErrorMsg error_message ) { struct perturb_parameters_and_workspace * pppaw; /** Summary: */ /** - define local variables */ double k; int index_md; //struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecmetric; double delta_g,theta_g,shear_g,l4_g,pol0_g,pol1_g,pol2_g,pol4_g; double delta_b,theta_b; double delta_cdm=0.,theta_cdm=0.; double delta_dcdm=0.,theta_dcdm=0.; double delta_dr=0.,theta_dr=0.,shear_dr=0., f_dr=1.0; double delta_ur=0.,theta_ur=0.,shear_ur=0.,l4_ur=0.; double delta_rho_scf=0., rho_plus_p_theta_scf=0.; double delta_scf=0., theta_scf=0.; /** - ncdm sector begins */ int n_ncdm; double *delta_ncdm=NULL, *theta_ncdm=NULL, *shear_ncdm=NULL, *delta_p_over_delta_rho_ncdm=NULL; double rho_ncdm_bg, p_ncdm_bg, pseudo_p_ncdm, w_ncdm; double rho_delta_ncdm = 0.0; double rho_plus_p_theta_ncdm = 0.0; double rho_plus_p_shear_ncdm = 0.0; double delta_p_ncdm = 0.0; double factor = 0.0; double q,q2,epsilon; /** - ncdm sector ends */ double phi=0.,psi=0.,alpha=0.; double delta_temp=0., delta_chi=0.; double a,a2,H; int idx,index_q, storeidx; double *dataptr; /** - rename structure fields (just to avoid heavy notations) */ pppaw = parameters_and_workspace; k = pppaw->k; index_md = pppaw->index_md; //ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecmetric = ppw->pvecmetric; a = pvecback[pba->index_bg_a]; a2 = a*a; H = pvecback[pba->index_bg_H]; if (pba->has_ncdm == _TRUE_){ class_alloc(delta_ncdm, sizeof(double)*pba->N_ncdm,error_message); class_alloc(theta_ncdm, sizeof(double)*pba->N_ncdm,error_message); class_alloc(shear_ncdm, sizeof(double)*pba->N_ncdm,error_message); class_alloc(delta_p_over_delta_rho_ncdm, sizeof(double)*pba->N_ncdm,error_message); } /** - calculate perturbed recombination */ if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){ delta_temp = y[ppw->pv->index_pt_perturbed_recombination_delta_temp]; delta_chi =y[ppw->pv->index_pt_perturbed_recombination_delta_chi]; } /** - for scalar modes */ if (_scalars_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { delta_g = y[ppw->pv->index_pt_delta_g]; theta_g = y[ppw->pv->index_pt_theta_g]; } else { delta_g = ppw->rsa_delta_g; theta_g = ppw->rsa_theta_g; } if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_on) { shear_g = ppw->tca_shear_g; //l3_g = 6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g; pol0_g = 2.5*ppw->tca_shear_g; pol1_g = 7./12.*6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g; pol2_g = 0.5*ppw->tca_shear_g; //pol3_g = 0.25*6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g; } else { shear_g = y[ppw->pv->index_pt_shear_g]; //l3_g = y[ppw->pv->index_pt_l3_g]; pol0_g = y[ppw->pv->index_pt_pol0_g]; pol1_g = y[ppw->pv->index_pt_pol1_g]; pol2_g = y[ppw->pv->index_pt_pol2_g]; //pol3_g = y[ppw->pv->index_pt_pol3_g]; } } else { shear_g = 0; //l3_g = 0; pol0_g = 0; pol1_g = 0; pol2_g = 0; //pol3_g = 0.; } if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { delta_ur = y[ppw->pv->index_pt_delta_ur]; theta_ur = y[ppw->pv->index_pt_theta_ur]; shear_ur = y[ppw->pv->index_pt_shear_ur]; } else { delta_ur = ppw->rsa_delta_ur; theta_ur = ppw->rsa_theta_ur; shear_ur = 0.; } } delta_b = y[ppw->pv->index_pt_delta_b]; theta_b = y[ppw->pv->index_pt_theta_b]; if (pba->has_cdm == _TRUE_) { delta_cdm = y[ppw->pv->index_pt_delta_cdm]; if (ppt->gauge == synchronous) { theta_cdm = 0.; } else { theta_cdm = y[ppw->pv->index_pt_theta_cdm]; } } /* gravitational potentials */ if (ppt->gauge == synchronous) { alpha = pvecmetric[ppw->index_mt_alpha]; psi = pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] * alpha + pvecmetric[ppw->index_mt_alpha_prime]; phi = y[ppw->pv->index_pt_eta] - pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; } else if (ppt->gauge == newtonian){ psi = pvecmetric[ppw->index_mt_psi]; phi = y[ppw->pv->index_pt_phi]; } else{ psi = 0.0; phi = 0.0; } if (pba->has_ncdm == _TRUE_) { /** - --> Get delta, deltaP/rho, theta, shear and store in array */ idx = ppw->pv->index_pt_psi0_ncdm1; if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){ // The perturbations are evolved integrated: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm]; pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; w_ncdm = p_ncdm_bg/rho_ncdm_bg; delta_ncdm[n_ncdm] = y[idx]; theta_ncdm[n_ncdm] = y[idx+1]; shear_ncdm[n_ncdm] = y[idx+2]; //This is the adiabatic sound speed: delta_p_over_delta_rho_ncdm[n_ncdm] = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg)); idx += ppw->pv->l_max_ncdm[n_ncdm]+1; } } else{ // We must integrate to find perturbations: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_delta_ncdm = 0.0; rho_plus_p_theta_ncdm = 0.0; rho_plus_p_shear_ncdm = 0.0; delta_p_ncdm = 0.0; factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) { q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2); rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1]; rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2]; delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; //Jump to next momentum bin: idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1); } rho_delta_ncdm *= factor; rho_plus_p_theta_ncdm *= k*factor; rho_plus_p_shear_ncdm *= 2.0/3.0*factor; delta_p_ncdm *= factor/3.; delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); delta_p_over_delta_rho_ncdm[n_ncdm] = delta_p_ncdm/rho_delta_ncdm; if (delta_p_over_delta_rho_ncdm[n_ncdm] < -0.5){ FILE * fid = fopen("integrand.dat","w"); fclose(fid); } } } } if (pba->has_dcdm == _TRUE_) { delta_dcdm = y[ppw->pv->index_pt_delta_dcdm]; theta_dcdm = y[ppw->pv->index_pt_theta_dcdm]; } if (pba->has_dr == _TRUE_) { f_dr = pow(pvecback[pba->index_bg_a]*pvecback[pba->index_bg_a]/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; delta_dr = y[ppw->pv->index_pt_F0_dr]/f_dr; theta_dr = y[ppw->pv->index_pt_F0_dr+1]*3./4.*k/f_dr; shear_dr = y[ppw->pv->index_pt_F0_dr+2]*0.5/f_dr; } if (pba->has_scf == _TRUE_){ if (ppt->gauge == synchronous){ delta_rho_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]); } else{ delta_rho_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf] - 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]); } rho_plus_p_theta_scf = 1./3.* k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf]; delta_scf = delta_rho_scf/pvecback[pba->index_bg_rho_scf]; theta_scf = rho_plus_p_theta_scf/(pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]); } /* converting synchronous variables to newtonian ones */ if (ppt->gauge == synchronous) { /* density and velocity perturbations (comment out if you wish to keep synchronous variables) */ delta_g -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_g += k*k*alpha; delta_b -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_b += k*k*alpha; if (pba->has_ur == _TRUE_) { delta_ur -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_ur += k*k*alpha; } if (pba->has_dr == _TRUE_) { delta_dr += (-4.*a*H+a*pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]/pvecback[pba->index_bg_rho_dr])*alpha; theta_dr += k*k*alpha; } if (pba->has_cdm == _TRUE_) { delta_cdm -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_cdm += k*k*alpha; } if (pba->has_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ /** - --> Do gauge transformation of delta, deltaP/rho (?) and theta using -= 3aH(1+w_ncdm) alpha for delta. */ } } if (pba->has_dcdm == _TRUE_) { delta_dcdm += alpha*(-a*pba->Gamma_dcdm-3.*a*H); theta_dcdm += k*k*alpha; } if (pba->has_scf == _TRUE_) { delta_scf += alpha*(-3.0*H*(1.0+pvecback[pba->index_bg_p_scf]/pvecback[pba->index_bg_rho_scf])); theta_scf += k*k*alpha; } } // fprintf(ppw->perturb_output_file," "); /** - --> Handle (re-)allocation */ if (ppt->scalar_perturbations_data[ppw->index_ikout] == NULL){ class_alloc(ppt->scalar_perturbations_data[ppw->index_ikout], sizeof(double)*ppt->number_of_scalar_titles, error_message); ppt->size_scalar_perturbation_data[ppw->index_ikout] = 0; } else{ ppt->scalar_perturbations_data[ppw->index_ikout] = realloc(ppt->scalar_perturbations_data[ppw->index_ikout], sizeof(double)*(ppt->size_scalar_perturbation_data[ppw->index_ikout]+ppt->number_of_scalar_titles)); } storeidx = 0; dataptr = ppt->scalar_perturbations_data[ppw->index_ikout]+ ppt->size_scalar_perturbation_data[ppw->index_ikout]; ppt->size_scalar_perturbation_data[ppw->index_ikout] += ppt->number_of_scalar_titles; class_store_double(dataptr, tau, _TRUE_, storeidx); class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx); class_store_double(dataptr, delta_g, _TRUE_, storeidx); class_store_double(dataptr, theta_g, _TRUE_, storeidx); class_store_double(dataptr, shear_g, _TRUE_, storeidx); class_store_double(dataptr, pol0_g, _TRUE_, storeidx); class_store_double(dataptr, pol1_g, _TRUE_, storeidx); class_store_double(dataptr, pol2_g, _TRUE_, storeidx); class_store_double(dataptr, delta_b, _TRUE_, storeidx); class_store_double(dataptr, theta_b, _TRUE_, storeidx); class_store_double(dataptr, psi, _TRUE_, storeidx); class_store_double(dataptr, phi, _TRUE_, storeidx); /* perturbed recombination */ class_store_double(dataptr, delta_temp, ppt->has_perturbed_recombination, storeidx); class_store_double(dataptr, delta_chi, ppt->has_perturbed_recombination, storeidx); /* Ultra relativistic species */ class_store_double(dataptr, delta_ur, pba->has_ur, storeidx); class_store_double(dataptr, theta_ur, pba->has_ur, storeidx); class_store_double(dataptr, shear_ur, pba->has_ur, storeidx); /* Cold dark matter */ class_store_double(dataptr, delta_cdm, pba->has_cdm, storeidx); class_store_double(dataptr, theta_cdm, pba->has_cdm, storeidx); /* Non-cold Dark Matter */ if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ class_store_double(dataptr, delta_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, theta_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, shear_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, delta_p_over_delta_rho_ncdm[n_ncdm], _TRUE_, storeidx); } } /* Decaying cold dark matter */ class_store_double(dataptr, delta_dcdm, pba->has_dcdm, storeidx); class_store_double(dataptr, theta_dcdm, pba->has_dcdm, storeidx); /* Decay radiation */ class_store_double(dataptr, delta_dr, pba->has_dr, storeidx); class_store_double(dataptr, theta_dr, pba->has_dr, storeidx); class_store_double(dataptr, shear_dr, pba->has_dr, storeidx); /* Scalar field scf*/ class_store_double(dataptr, delta_scf, pba->has_scf, storeidx); class_store_double(dataptr, theta_scf, pba->has_scf, storeidx); //fprintf(ppw->perturb_output_file,"\n"); } /** - for tensor modes: */ if (_tensors_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) { delta_g = y[ppw->pv->index_pt_delta_g]; shear_g = y[ppw->pv->index_pt_shear_g]; l4_g = y[ppw->pv->index_pt_delta_g+4]; pol0_g = y[ppw->pv->index_pt_pol0_g]; pol2_g = y[ppw->pv->index_pt_pol2_g]; pol4_g = y[ppw->pv->index_pt_pol0_g+4]; } else { delta_g = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC shear_g = 0.; l4_g = 0.; pol0_g = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC pol2_g = 0.; pol4_g = 0.; } } else { delta_g = 0.; shear_g = 0.; l4_g = 0.; pol0_g = 0.; pol2_g = 0.; pol4_g = 0.; } if (ppt->evolve_tensor_ur == _TRUE_){ delta_ur = y[ppw->pv->index_pt_delta_ur]; shear_ur = y[ppw->pv->index_pt_shear_ur]; l4_ur = y[ppw->pv->index_pt_delta_ur+4]; } /** - --> Handle (re-)allocation */ if (ppt->tensor_perturbations_data[ppw->index_ikout] == NULL){ class_alloc(ppt->tensor_perturbations_data[ppw->index_ikout], sizeof(double)*ppt->number_of_tensor_titles, error_message); ppt->size_tensor_perturbation_data[ppw->index_ikout] = 0; } else{ ppt->tensor_perturbations_data[ppw->index_ikout] = realloc(ppt->tensor_perturbations_data[ppw->index_ikout], sizeof(double)*(ppt->size_tensor_perturbation_data[ppw->index_ikout]+ppt->number_of_tensor_titles)); } storeidx = 0; dataptr = ppt->tensor_perturbations_data[ppw->index_ikout]+ ppt->size_tensor_perturbation_data[ppw->index_ikout]; ppt->size_tensor_perturbation_data[ppw->index_ikout] += ppt->number_of_tensor_titles; //fprintf(ppw->perturb_output_file," "); class_store_double(dataptr, tau, _TRUE_, storeidx); class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx); class_store_double(dataptr, delta_g, _TRUE_, storeidx); class_store_double(dataptr, shear_g, _TRUE_, storeidx); class_store_double(dataptr, l4_g, _TRUE_, storeidx); class_store_double(dataptr, pol0_g, _TRUE_, storeidx); class_store_double(dataptr, pol2_g, _TRUE_, storeidx); class_store_double(dataptr, pol4_g, _TRUE_, storeidx); class_store_double(dataptr, y[ppw->pv->index_pt_gw], _TRUE_, storeidx); class_store_double(dataptr, y[ppw->pv->index_pt_gwdot], _TRUE_, storeidx); class_store_double(dataptr, delta_ur, ppt->evolve_tensor_ur, storeidx); class_store_double(dataptr, shear_ur, ppt->evolve_tensor_ur, storeidx); class_store_double(dataptr, l4_ur, ppt->evolve_tensor_ur, storeidx); //printf("index_pt_delta+ur = %d\n",ppw->pv->index_pt_delta_ur); /* Non-cold Dark Matter */ if (ppt->evolve_tensor_ncdm == _TRUE_) { idx = ppw->pv->index_pt_psi0_ncdm1; for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_delta_ncdm = 0.0; rho_plus_p_theta_ncdm = 0.0; rho_plus_p_shear_ncdm = 0.0; delta_p_ncdm = 0.0; factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) { q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2); rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1]; rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2]; delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; //Jump to next momentum bin: idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1); } rho_delta_ncdm *= factor; rho_plus_p_theta_ncdm *= k*factor; rho_plus_p_shear_ncdm *= 2.0/3.0*factor; delta_p_ncdm *= factor/3.; delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); class_store_double(dataptr, delta_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, theta_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, shear_ncdm[n_ncdm], _TRUE_, storeidx); } } // fprintf(ppw->perturb_output_file,"\n"); } if (pba->has_ncdm == _TRUE_){ free(delta_ncdm); free(theta_ncdm); free(shear_ncdm); free(delta_p_over_delta_rho_ncdm); } return _SUCCESS_; } /** * Compute derivative of all perturbations to be integrated * * For each mode (scalar/vector/tensor) and each wavenumber k, this * function computes the derivative of all values in the vector of * perturbed variables to be integrated. * * This is one of the few functions in the code which is passed to the generic_integrator() routine. * Since generic_integrator() should work with functions passed from various modules, the format of the arguments * is a bit special: * - fixed parameters and workspaces are passed through a generic pointer. * generic_integrator() doesn't know what the content of this pointer is. * - errors are not written as usual in pth->error_message, but in a generic * error_message passed in the list of arguments. * * @param tau Input: conformal time * @param y Input: vector of perturbations * @param dy Output: vector of its derivatives (already allocated) * @param parameters_and_workspace Input/Output: in input, fixed parameters (e.g. indices); in output, background and thermo quantities evaluated at tau. * @param error_message Output: error message */ int perturb_derivs(double tau, double * y, double * dy, void * parameters_and_workspace, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ /* multipole */ int l; /* scale factor and other background quantities */ double a,a2,a_prime_over_a,R; /* short-cut names for the fields of the input structure */ struct perturb_parameters_and_workspace * pppaw; double k,k2; int index_md; struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; double * pvecmetric; double * s_l; struct perturb_vector * pv; /* short-cut notations for the perturbations */ double delta_g=0.,theta_g=0.,shear_g=0.; double delta_b,theta_b; double cb2,cs2,ca2; double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_ufa_class=0.; /* perturbed recombination (just to simplify the notation) */ double H0=0.,Nnow=0.,n_H=0.,fHe=0.; double delta_temp=0.,delta_chi=0., chi=0.; double alpha_rec=0.,delta_alpha_rec=0.; double a_rad=0., Compton_CR =0.; double Tb_in_K=0.; /* Non-metric source terms for photons, i.e. \mathcal{P}^{(m)} from arXiv:1305.3261 */ double P0,P1,P2; /* for use with fluid (fld): */ double w,w_prime; /* for use with non-cold dark matter (ncdm): */ int index_q,n_ncdm,idx; double q,epsilon,dlnf0_dlnq,qk_div_epsilon; double rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm,w_ncdm,ca2_ncdm,ceff2_ncdm=0.,cvis2_ncdm=0.; /* for use with curvature */ double cotKgen, sqrt_absK; double s2_squared, ssqrt3; /* for use with dcdm and dr */ double f_dr, fprime_dr; /** - rename the fields of the input structure (just to avoid heavy notations) */ pppaw = parameters_and_workspace; k = pppaw->k; k2=k*k; index_md = pppaw->index_md; ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; s_l = ppw->s_l; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; pvecmetric = ppw->pvecmetric; pv = ppw->pv; /** - get background/thermo quantities in this point */ class_call(background_at_tau(pba, tau, pba->normal_info, pba->inter_closeby, &(ppw->last_index_back), pvecback), pba->error_message, error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_closeby, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); /** - get metric perturbations with perturb_einstein() */ class_call(perturb_einstein(ppr, pba, pth, ppt, index_md, k, tau, y, ppw), ppt->error_message, error_message); /** - compute related background quantities */ a = pvecback[pba->index_bg_a]; a2 = a*a; a_prime_over_a = pvecback[pba->index_bg_H] * a; R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b]; /** - Compute 'generalised cotK function of argument \f$ \sqrt{|K|}*\tau \f$, for closing hierarchy. (see equation 2.34 in arXiv:1305.3261): */ if (pba->has_curvature == _FALSE_){ cotKgen = 1.0/(k*tau); } else{ sqrt_absK = sqrt(fabs(pba->K)); if (pba->K < 0) cotKgen = sqrt_absK/k/tanh(sqrt_absK*tau); else cotKgen = sqrt_absK/k/tan(sqrt_absK*tau); } s2_squared = 1.-3.*pba->K/k2; /** - for scalar modes: */ if (_scalars_) { /** - --> (a) define short-cut notations for the scalar perturbations */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; } delta_b = y[pv->index_pt_delta_b]; theta_b = y[pv->index_pt_theta_b]; cb2 = pvecthermo[pth->index_th_cb2]; /** - --> (b) perturbed recombination **/ if ((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca]==(int)tca_off)){ delta_temp= y[ppw->pv->index_pt_perturbed_recombination_delta_temp]; delta_chi= y[ppw->pv->index_pt_perturbed_recombination_delta_chi]; chi=pvecthermo[pth->index_th_xe]; // Conversion of H0 in inverse seconds (pba->H0 is [H0/c] in inverse Mpcs) H0 = pba->H0 * _c_ / _Mpc_over_m_; //Computation of Nnow in SI units Nnow = 3.*H0*H0*pba->Omega0_b*(1.-pth->YHe)/(8.*_PI_*_G_*_m_H_); // total amount of hydrogen today n_H = (pba->a_today/a)*(pba->a_today/a)*(pba->a_today/a)* Nnow; // Helium-to-hydrogen ratio fHe = pth->YHe / (_not4_*(1-pth->YHe)); // The constant such that rho_gamma = a_rad * T^4 a_rad = 8./15.*pow(_PI_,5)*pow(_k_B_,4)/pow(_c_*_h_P_,3); // Compton cooling rate in Mpc^(-1) Compton_CR = 8./3. *_sigma_ * a_rad /(_m_e_ * _c_ *_c_) *_Mpc_over_m_ ; // Temperature is already in Kelvin Tb_in_K = pvecthermo[pth->index_th_Tb]; // Alpha in m^3/s, cf. Recfast paper alpha_rec = 1.14 * 4.309e-19*pow((Tb_in_K * 1e-4),-0.6166)/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) ; // delta alpha, dimensionless delta_alpha_rec= (-0.6166 + 0.6703 * pow((Tb_in_K * 1e-4),0.53)*(-0.6166-0.53))/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) * delta_temp; } // end of perturbed recombination related quantities /** - --> (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below) - Each continuity equation contains a term in (theta+metric_continuity) with metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge - Each Euler equation contains a source term metric_euler with metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge - Each shear derivative equation contains a source term metric_shear equal to metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge - metric_shear_prime is the derivative of metric_shear - In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge, (-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */ if (ppt->gauge == synchronous) { metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.; metric_euler = 0.; metric_shear = k2 * pvecmetric[ppw->index_mt_alpha]; //metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime]; metric_ufa_class = pvecmetric[ppw->index_mt_h_prime]/2.; } if (ppt->gauge == newtonian) { metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime]; metric_euler = k2*pvecmetric[ppw->index_mt_psi]; metric_shear = 0.; //metric_shear_prime = 0.; metric_ufa_class = -6.*pvecmetric[ppw->index_mt_phi_prime]; } /** - --> (d) if some approximation schemes are turned on, enforce a few y[] values computed in perturb_einstein */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { delta_g = ppw->rsa_delta_g; theta_g = ppw->rsa_theta_g; } /** - --> (e) BEGINNING OF ACTUAL SYSTEM OF EQUATIONS OF EVOLUTION */ /* Note concerning perturbed recombination: $cb2*delta_b$ must be replaced everywhere by $cb2*(delta_b+delta_temp)$. If perturbed recombination is not required, delta_temp is equal to zero. */ /** - ---> photon temperature density */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { dy[pv->index_pt_delta_g] = -4./3.*(theta_g+metric_continuity); } /** - ---> baryon density */ dy[pv->index_pt_delta_b] = -(theta_b+metric_continuity); /** - ---> baryon velocity (depends on tight-coupling approximation=tca) */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* without tca */ /** - ----> perturbed recombination has an impact **/ dy[pv->index_pt_theta_b] = - a_prime_over_a*theta_b + metric_euler + k2*cb2*(delta_b+delta_temp) + R*pvecthermo[pth->index_th_dkappa]*(theta_g-theta_b); } else { /* with tca */ class_call(perturb_tca_slip_and_shear(y,pppaw,error_message), error_message, error_message); /* perturbed recombination has an impact **/ dy[pv->index_pt_theta_b] = (-a_prime_over_a*theta_b +k2*(cb2*(delta_b+delta_temp)+R*(delta_g/4.-s2_squared*ppw->tca_shear_g)) +R*ppw->tca_slip)/(1.+R) +metric_euler; } /** - ---> photon temperature higher momenta and photon polarization (depend on tight-coupling approximation) */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /** - ----> if photon tight-coupling is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /** - -----> define \f$ \Pi = G_{\gamma 0} + G_{\gamma 2} + F_{\gamma 2} \f$ */ P0 = (y[pv->index_pt_pol0_g] + y[pv->index_pt_pol2_g] + 2.*s_l[2]*y[pv->index_pt_shear_g])/8.; /** - -----> photon temperature velocity */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s2_squared*y[pv->index_pt_shear_g]) + metric_euler + pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g); /** - -----> photon temperature shear */ dy[pv->index_pt_shear_g] = 0.5*(8./15.*(theta_g+metric_shear) -3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_l3_g] -pvecthermo[pth->index_th_dkappa]*(2.*y[pv->index_pt_shear_g]-4./5./s_l[2]*P0)); /** - -----> photon temperature l=3 */ l = 3; dy[pv->index_pt_l3_g] = k/(2.0*l+1.0)* (l*s_l[l]*2.*s_l[2]*y[pv->index_pt_shear_g]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_g+1]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g]; /** - -----> photon temperature l>3 */ for (l = 4; l < pv->l_max_g; l++) { dy[pv->index_pt_delta_g+l] = k/(2.0*l+1.0)* (l*s_l[l]*y[pv->index_pt_delta_g+l-1]-(l+1)*s_l[l+1]*y[pv->index_pt_delta_g+l+1]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; } /** - -----> photon temperature lmax */ l = pv->l_max_g; /* l=lmax */ dy[pv->index_pt_delta_g+l] = k*(s_l[l]*y[pv->index_pt_delta_g+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /** - -----> photon polarization l=0 */ dy[pv->index_pt_pol0_g] = -k*y[pv->index_pt_pol0_g+1] -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-4.*P0); /** - -----> photon polarization l=1 */ dy[pv->index_pt_pol1_g] = k/3.*(y[pv->index_pt_pol1_g-1]-2.*s_l[2]*y[pv->index_pt_pol1_g+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol1_g]; /** - -----> photon polarization l=2 */ dy[pv->index_pt_pol2_g] = k/5.*(2.*s_l[2]*y[pv->index_pt_pol2_g-1]-3.*s_l[3]*y[pv->index_pt_pol2_g+1]) -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol2_g]-4./5.*P0); /** - -----> photon polarization l>2 */ for (l=3; l < pv->l_max_pol_g; l++) dy[pv->index_pt_pol0_g+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /** - -----> photon polarization lmax_pol */ l = pv->l_max_pol_g; dy[pv->index_pt_pol0_g+l] = k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1)*cotKgen*y[pv->index_pt_pol0_g+l]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; } /** - ----> if photon tight-coupling is on: */ else { /** - -----> in that case, only need photon velocity */ /* perturbed recombination has an impact **/ dy[pv->index_pt_theta_g] = -(dy[pv->index_pt_theta_b]+a_prime_over_a*theta_b-cb2*k2*(delta_b+delta_temp))/R +k2*(0.25*delta_g-s2_squared*ppw->tca_shear_g)+(1.+R)/R*metric_euler; } } /** - ---> cdm */ if (pba->has_cdm == _TRUE_) { /** - ----> newtonian gauge: cdm density and velocity */ if (ppt->gauge == newtonian) { dy[pv->index_pt_delta_cdm] = -(y[pv->index_pt_theta_cdm]+metric_continuity); /* cdm density */ dy[pv->index_pt_theta_cdm] = - a_prime_over_a*y[pv->index_pt_theta_cdm] + metric_euler; /* cdm velocity */ } /** - ----> synchronous gauge: cdm density only (velocity set to zero by definition of the gauge) */ if (ppt->gauge == synchronous) { dy[pv->index_pt_delta_cdm] = -metric_continuity; /* cdm density */ } } /* perturbed recombination */ /* computes the derivatives of delta x_e and delta T_b */ if((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca] == (int)tca_off)){ // alpha * n_H is in inverse seconds, so we have to multiply it by Mpc_in_sec dy[ppw->pv->index_pt_perturbed_recombination_delta_chi] = - alpha_rec* a * chi*n_H *(delta_alpha_rec + delta_chi + delta_b) * _Mpc_over_m_ / _c_ ; // see the documentation for this formula dy[ppw->pv->index_pt_perturbed_recombination_delta_temp] = 2./3. * dy[ppw->pv->index_pt_delta_b] - a * Compton_CR * pow(pba->T_cmb/a, 4) * chi / (1.+chi+fHe) * ( (1.-pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb])*(delta_g + delta_chi*(1.+fHe)/(1.+chi+fHe)) + pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb] *(delta_temp - 1./4. * delta_g) ); } /** - ---> dcdm and dr */ if (pba->has_dcdm == _TRUE_) { /** - ----> dcdm */ dy[pv->index_pt_delta_dcdm] = -(y[pv->index_pt_theta_dcdm]+metric_continuity) - a * pba->Gamma_dcdm / k2 * metric_euler; /* dcdm density */ dy[pv->index_pt_theta_dcdm] = - a_prime_over_a*y[pv->index_pt_theta_dcdm] + metric_euler; /* dcdm velocity */ } /** - ---> dr */ if ((pba->has_dcdm == _TRUE_)&&(pba->has_dr == _TRUE_)) { /* f = rho_dr*a^4/rho_crit_today. In CLASS density units rho_crit_today = H0^2. */ f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; fprime_dr = pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]*pow(a,5)/pow(pba->H0,2); /** - ----> dr F0 */ dy[pv->index_pt_F0_dr] = -k*y[pv->index_pt_F0_dr+1]-4./3.*metric_continuity*f_dr+ fprime_dr*(y[pv->index_pt_delta_dcdm]+metric_euler/k2); /** - ----> dr F1 */ dy[pv->index_pt_F0_dr+1] = k/3.*y[pv->index_pt_F0_dr]-2./3.*k*y[pv->index_pt_F0_dr+2]*s2_squared + 4*metric_euler/(3.*k)*f_dr + fprime_dr/k*y[pv->index_pt_theta_dcdm]; /** - ----> exact dr F2 */ dy[pv->index_pt_F0_dr+2] = 8./15.*(3./4.*k*y[pv->index_pt_F0_dr+1]+metric_shear*f_dr) -3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_F0_dr+3]; /** - ----> exact dr l=3 */ l = 3; dy[pv->index_pt_F0_dr+3] = k/(2.*l+1.)* (l*s_l[l]*s_l[2]*y[pv->index_pt_F0_dr+2]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+4]); /** - ----> exact dr l>3 */ for (l = 4; l < pv->l_max_dr; l++) { dy[pv->index_pt_F0_dr+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_F0_dr+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+l+1]); } /** - ----> exact dr lmax_dr */ l = pv->l_max_dr; dy[pv->index_pt_F0_dr+l] = k*(s_l[l]*y[pv->index_pt_F0_dr+l-1]-(1.+l)*cotKgen*y[pv->index_pt_F0_dr+l]); } /** - ---> fluid (fld) */ if (pba->has_fld == _TRUE_) { /** - ----> factors w, w_prime, adiabatic sound speed ca2 (all three background-related), plus actual sound speed in the fluid rest frame cs2 */ w = pba->w0_fld + pba->wa_fld * (1. - a / pba->a_today); w_prime = - pba->wa_fld * a / pba->a_today * a_prime_over_a; ca2 = w - w_prime / 3. / (1.+w) / a_prime_over_a; cs2 = pba->cs2_fld; /** - ----> fluid density */ dy[pv->index_pt_delta_fld] = -(1+w)*(y[pv->index_pt_theta_fld]+metric_continuity) -3.*(cs2-w)*a_prime_over_a*y[pv->index_pt_delta_fld] -9.*(1+w)*(cs2-ca2)*a_prime_over_a*a_prime_over_a*y[pv->index_pt_theta_fld]/k2; /** - ----> fluid velocity */ dy[pv->index_pt_theta_fld] = /* fluid velocity */ -(1.-3.*cs2)*a_prime_over_a*y[pv->index_pt_theta_fld] +cs2*k2/(1.+w)*y[pv->index_pt_delta_fld] +metric_euler; } /** - ---> scalar field (scf) */ if (pba->has_scf == _TRUE_) { /** - ----> field value */ dy[pv->index_pt_phi_scf] = y[pv->index_pt_phi_prime_scf]; /** - ----> Klein Gordon equation */ dy[pv->index_pt_phi_prime_scf] = - 2.*a_prime_over_a*y[pv->index_pt_phi_prime_scf] - metric_continuity*pvecback[pba->index_bg_phi_prime_scf] // metric_continuity = h'/2 - (k2 + a2*pvecback[pba->index_bg_ddV_scf])*y[pv->index_pt_phi_scf]; //checked } /** - ---> ultra-relativistic neutrino/relics (ur) */ if (pba->has_ur == _TRUE_) { /** - ----> if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /** - -----> ur density */ dy[pv->index_pt_delta_ur] = // standard term -4./3.*(y[pv->index_pt_theta_ur] + metric_continuity) // non-standard term, non-zero if if ceff2_ur not 1/3 +(1.-ppt->three_ceff2_ur)*a_prime_over_a*(y[pv->index_pt_delta_ur] + 4.*a_prime_over_a*y[pv->index_pt_theta_ur]/k/k); /** - -----> ur velocity */ dy[pv->index_pt_theta_ur] = // standard term with extra coefficient (3 ceff2_ur), normally equal to one k2*(ppt->three_ceff2_ur*y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]) + metric_euler // non-standard term, non-zero if ceff2_ur not 1/3 -(1.-ppt->three_ceff2_ur)*a_prime_over_a*y[pv->index_pt_theta_ur]; if(ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { /** - -----> exact ur shear */ dy[pv->index_pt_shear_ur] = 0.5*( // standard term 8./15.*(y[pv->index_pt_theta_ur]+metric_shear)-3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1] // non-standard term, non-zero if cvis2_ur not 1/3 -(1.-ppt->three_cvis2_ur)*(8./15.*(y[pv->index_pt_theta_ur]+metric_shear))); /** - -----> exact ur l=3 */ l = 3; dy[pv->index_pt_l3_ur] = k/(2.*l+1.)* (l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]); /** - -----> exact ur l>3 */ for (l = 4; l < pv->l_max_ur; l++) { dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]); } /** - -----> exact ur lmax_ur */ l = pv->l_max_ur; dy[pv->index_pt_delta_ur+l] = k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]); } else { /** - -----> in fluid approximation (ufa): only ur shear needed */ //TBC: curvature? /* a la Ma & Bertschinger */ if (ppr->ur_fluid_approximation == ufa_mb) { dy[pv->index_pt_shear_ur] = -3./tau*y[pv->index_pt_shear_ur] +2./3.*(y[pv->index_pt_theta_ur]+metric_shear); } /* a la Hu */ if (ppr->ur_fluid_approximation == ufa_hu) { dy[pv->index_pt_shear_ur] = -3.*a_prime_over_a*y[pv->index_pt_shear_ur] +2./3.*(y[pv->index_pt_theta_ur]+metric_shear); } /* a la CLASS */ if (ppr->ur_fluid_approximation == ufa_CLASS) { dy[pv->index_pt_shear_ur] = -3./tau*y[pv->index_pt_shear_ur] +2./3.*(y[pv->index_pt_theta_ur]+metric_ufa_class); } } } } /** - ---> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */ //TBC: curvature in all ncdm if (pba->has_ncdm == _TRUE_) { idx = pv->index_pt_psi0_ncdm1; /** - ----> first case: use a fluid approximation (ncdmfa) */ //TBC: curvature if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on) { /** - -----> loop over species */ for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) { /** - -----> define intermediate quantitites */ rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; /* background density */ p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm]; /* background pressure */ pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; /* pseudo-pressure (see CLASS IV paper) */ w_ncdm = p_ncdm_bg/rho_ncdm_bg; /* equation of state parameter */ ca2_ncdm = w_ncdm/3.0/(1.0+w_ncdm)*(5.0-pseudo_p_ncdm/p_ncdm_bg); /* adiabatic sound speed */ /* c_eff is (delta p / delta rho) in the gauge under consideration (not in the gauge comoving with the fluid) */ /* c_vis is introduced in order to close the system */ /* different ansatz for sound speed c_eff and viscosity speed c_vis */ if (ppr->ncdm_fluid_approximation == ncdmfa_mb) { ceff2_ncdm = ca2_ncdm; cvis2_ncdm = 3.*w_ncdm*ca2_ncdm; } if (ppr->ncdm_fluid_approximation == ncdmfa_hu) { ceff2_ncdm = ca2_ncdm; cvis2_ncdm = w_ncdm; } if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) { ceff2_ncdm = ca2_ncdm; cvis2_ncdm = 3.*w_ncdm*ca2_ncdm; } /** - -----> exact continuity equation */ dy[idx] = -(1.0+w_ncdm)*(y[idx+1]+metric_continuity)- 3.0*a_prime_over_a*(ceff2_ncdm-w_ncdm)*y[idx]; /** - -----> exact euler equation */ dy[idx+1] = -a_prime_over_a*(1.0-3.0*ca2_ncdm)*y[idx+1]+ ceff2_ncdm/(1.0+w_ncdm)*k2*y[idx]-k2*y[idx+2] + metric_euler; /** - -----> different ansatz for approximate shear derivative */ if (ppr->ncdm_fluid_approximation == ncdmfa_mb) { dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2] +8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear); } if (ppr->ncdm_fluid_approximation == ncdmfa_hu) { dy[idx+2] = -3.0*a_prime_over_a*ca2_ncdm/w_ncdm*y[idx+2] +8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear); } if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) { dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2] +8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_ufa_class); } /** - -----> jump to next species */ idx += pv->l_max_ncdm[n_ncdm]+1; } } /** - ----> second case: use exact equation (Boltzmann hierarchy on momentum grid) */ else { /** - -----> loop over species */ for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) { /** - -----> loop over momentum */ for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) { /** - -----> define intermediate quantities */ dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; q = pba->q_ncdm[n_ncdm][index_q]; epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); qk_div_epsilon = k*q/epsilon; /** - -----> ncdm density for given momentum bin */ dy[idx] = -qk_div_epsilon*y[idx+1]+metric_continuity*dlnf0_dlnq/3.; /** - -----> ncdm velocity for given momentum bin */ dy[idx+1] = qk_div_epsilon/3.0*(y[idx] - 2*s_l[2]*y[idx+2]) -epsilon*metric_euler/(3*q*k)*dlnf0_dlnq; /** - -----> ncdm shear for given momentum bin */ dy[idx+2] = qk_div_epsilon/5.0*(2*s_l[2]*y[idx+1]-3.*s_l[3]*y[idx+3]) -s_l[2]*metric_shear*2./15.*dlnf0_dlnq; /** - -----> ncdm l>3 for given momentum bin */ for(l=3; l<pv->l_max_ncdm[n_ncdm]; l++){ dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]); } /** - -----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger) but with curvature taken into account a la arXiv:1305.3261 */ dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l]; /** - -----> jump to next momentum bin or species */ idx += (pv->l_max_ncdm[n_ncdm]+1); } } } } /** - ---> metric */ /** - ---> eta of synchronous gauge */ if (ppt->gauge == synchronous) { dy[pv->index_pt_eta] = pvecmetric[ppw->index_mt_eta_prime]; } if (ppt->gauge == newtonian) { dy[pv->index_pt_phi] = pvecmetric[ppw->index_mt_phi_prime]; } } /** - vector mode */ if (_vectors_) { fprintf(stderr,"we are in vectors\n"); ssqrt3 = sqrt(1.-2.*pba->K/k2); cb2 = pvecthermo[pth->index_th_cb2]; /** - --> baryon velocity */ if (ppt->gauge == synchronous) { dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b] - pvecthermo[pth->index_th_dkappa]*(_SQRT2_/4.*delta_g + y[pv->index_pt_theta_b]); } else if (ppt->gauge == newtonian) { dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b] - _SQRT2_/4.*pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]) + pvecmetric[ppw->index_mt_V_prime]+(1.-3.*cb2)*a_prime_over_a*y[pv->index_pt_V]; } /* if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) { */ /* short-cut notations for the tensor perturbations */ delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; shear_g = y[pv->index_pt_shear_g]; /* (P^{(1)}) (see Eq. B.23 in 1305.3261)*/ P1 = -_SQRT6_/40.*( 4./(3.*k)*theta_g //F1 +y[pv->index_pt_delta_g+3] +2.*y[pv->index_pt_pol0_g] +10./7.*y[pv->index_pt_pol2_g] -4./7.*y[pv->index_pt_pol0_g+4]); if (ppt->gauge == synchronous) { /* photon density (delta_g = F_0) */ dy[pv->index_pt_delta_g] = -4./3.*theta_g -pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]); /* photon velocity (theta_g = (3k/4)*F_1) */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s_l[2]*shear_g) -pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1) +4.0/(3.0*_SQRT2_)*ssqrt3*y[pv->index_pt_hv_prime]; } else if (ppt->gauge == newtonian) { /* photon density (delta_g = F_0) */ dy[pv->index_pt_delta_g] = -4./3.*theta_g -pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]) -2.*_SQRT2_*pvecmetric[ppw->index_mt_V_prime]; /* photon velocity (theta_g = (3k/4)*F_1) */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s_l[2]*shear_g) -pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1); } /* photon shear (shear_g = F_2/2) */ dy[pv->index_pt_shear_g] = 4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1] -pvecthermo[pth->index_th_dkappa]*shear_g; /* photon l=3 */ dy[pv->index_pt_l3_g] = k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g]; /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=4; l < pv->l_max_g; l++) dy[pv->index_pt_delta_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* l=lmax */ l = pv->l_max_g; dy[pv->index_pt_delta_g+l] = k*(s_l[l]*y[pv->index_pt_delta_g+l-1] -(1.+l)*cotKgen*y[pv->index_pt_delta_g+l]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* photon polarization, l=0 (pol0_g = G_0)*/ dy[pv->index_pt_pol0_g] = -k*y[pv->index_pt_pol0_g+1] -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P1); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=1; l < pv->l_max_pol_g; l++) dy[pv->index_pt_pol0_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /* l=lmax */ l = pv->l_max_pol_g; dy[pv->index_pt_pol0_g+l] = k*(s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /* } } */ if (ppt->gauge == synchronous) { /* Vector metric perturbation in synchronous gauge: */ dy[pv->index_pt_hv_prime] = pvecmetric[ppw->index_mt_hv_prime_prime]; } else if (ppt->gauge == newtonian){ /* Vector metric perturbation in Newtonian gauge: */ dy[pv->index_pt_V] = pvecmetric[ppw->index_mt_V_prime]; } } /** - tensor modes: */ if (_tensors_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) { /* short-cut notations for the tensor perturbations */ delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; shear_g = y[pv->index_pt_shear_g]; /* (P^{(2)}) */ P2 =-1.0/_SQRT6_*( 1./10.*delta_g +2./7.*shear_g +3./70.*y[pv->index_pt_delta_g+4] -3./5.*y[pv->index_pt_pol0_g] +6./7.*y[pv->index_pt_pol2_g] -3./70.*y[pv->index_pt_pol0_g+4]); /* above expression from paper, expression below matches old class but is not correct P2 = -1.0/_SQRT6_*( 1./10.*delta_g +2./35.*shear_g +1./210.*y[pv->index_pt_delta_g+4] -3./5.*y[pv->index_pt_pol0_g] +6./35.*y[pv->index_pt_pol2_g] -1./210.*y[pv->index_pt_pol0_g+4] ); */ /* photon density (delta_g = F_0) */ dy[pv->index_pt_delta_g] = -4./3.*theta_g -pvecthermo[pth->index_th_dkappa]*(delta_g+_SQRT6_*P2) //+y[pv->index_pt_gwdot]; +_SQRT6_*y[pv->index_pt_gwdot]; //TBC /* photon velocity (theta_g = (3k/4)*F_1) */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s_l[2]*shear_g) -pvecthermo[pth->index_th_dkappa]*theta_g; /* photon shear (shear_g = F_2/2) */ dy[pv->index_pt_shear_g] = 4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1] -pvecthermo[pth->index_th_dkappa]*shear_g; /* photon l=3 */ dy[pv->index_pt_l3_g] = k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g]; /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=4; l < pv->l_max_g; l++) dy[pv->index_pt_delta_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* l=lmax */ l = pv->l_max_g; dy[pv->index_pt_delta_g+l] = k*(s_l[l]*y[pv->index_pt_delta_g+l-1] -(1.+l)*cotKgen*y[pv->index_pt_delta_g+l]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* photon polarization, l=0 (pol0_g = G_0)*/ dy[pv->index_pt_pol0_g] = -k*y[pv->index_pt_pol0_g+1] -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=1; l < pv->l_max_pol_g; l++) dy[pv->index_pt_pol0_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /* l=lmax */ l = pv->l_max_pol_g; dy[pv->index_pt_pol0_g+l] = k*(s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; } } if (ppt->evolve_tensor_ur == _TRUE_) { dy[pv->index_pt_delta_ur] = -4./3.*y[pv->index_pt_theta_ur]+_SQRT6_*y[pv->index_pt_gwdot]; dy[pv->index_pt_theta_ur] = k2*(y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]); dy[pv->index_pt_shear_ur] = (4./15.*y[pv->index_pt_theta_ur] -3./10.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]); l = 3; dy[pv->index_pt_l3_ur] = k/(2.*l+1.)* (l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]); for (l = 4; l < pv->l_max_ur; l++) { dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]); } l = pv->l_max_ur; dy[pv->index_pt_delta_ur+l] = k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]); } /** - --> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */ //TBC: curvature in all ncdm if (ppt->evolve_tensor_ncdm == _TRUE_) { idx = pv->index_pt_psi0_ncdm1; /** - ---> loop over species */ for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) { /** - ----> loop over momentum */ for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) { /** - ----> define intermediate quantities */ dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; q = pba->q_ncdm[n_ncdm][index_q]; epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); qk_div_epsilon = k*q/epsilon; /** - ----> ncdm density for given momentum bin */ dy[idx] = -qk_div_epsilon*y[idx+1]-0.25*_SQRT6_*y[pv->index_pt_gwdot]*dlnf0_dlnq; /** - ----> ncdm l>0 for given momentum bin */ for(l=1; l<pv->l_max_ncdm[n_ncdm]; l++){ dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]); } /** - ----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger) but with curvature taken into account a la arXiv:1305.3261 */ dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l]; /** - ----> jump to next momentum bin or species */ idx += (pv->l_max_ncdm[n_ncdm]+1); } } } /** - --> tensor metric perturbation h (gravitational waves) */ dy[pv->index_pt_gw] = y[pv->index_pt_gwdot]; /** - --> its time-derivative */ dy[pv->index_pt_gwdot] = pvecmetric[ppw->index_mt_gw_prime_prime]; } return _SUCCESS_; } int perturb_tca_slip_and_shear(double * y, void * parameters_and_workspace, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ /* scale factor and other background quantities */ double a,a_prime_over_a,a_primeprime_over_a,R; /* useful terms for tight-coupling approximation */ double slip=0.; double tau_c=0.,dtau_c=0.; double theta_prime,shear_g_prime=0.,theta_prime_prime; double g0,g0_prime,g0_prime_prime; double F=0.,F_prime=0.,F_prime_prime=0.; /* short-cut names for the fields of the input structure */ struct perturb_parameters_and_workspace * pppaw; double k,k2; struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; double * pvecmetric; struct perturb_vector * pv; /* short-cut notations for the perturbations */ double delta_g=0.,theta_g=0.,shear_g=0.; double delta_b,theta_b; double Delta; double cb2; double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_shear_prime=0.; /* perturbed recombination */ double delta_temp=0.; /* for use with curvature */ double s2_squared; /** - rename the fields of the input structure (just to avoid heavy notations) */ pppaw = parameters_and_workspace; k = pppaw->k; k2=k*k; ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; pvecmetric = ppw->pvecmetric; pv = ppw->pv; /** - compute related background quantities */ a = pvecback[pba->index_bg_a]; a_prime_over_a = pvecback[pba->index_bg_H] * a; a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * a + 2. * a_prime_over_a * a_prime_over_a; //z = pba->a_today-1.; R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b]; s2_squared = 1.-3.*pba->K/k2; /** - --> (a) define short-cut notations for the scalar perturbations */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; } delta_b = y[pv->index_pt_delta_b]; theta_b = y[pv->index_pt_theta_b]; cb2 = pvecthermo[pth->index_th_cb2]; /* perturbed recombination */ if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){ delta_temp = y[pv->index_pt_perturbed_recombination_delta_temp]; } /** - --> (b) define short-cut notations used only in tight-coupling approximation */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; /* inverse of opacity */ dtau_c = -pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c; /* its first derivative wrt conformal time */ F = tau_c/(1+R); /* F = tau_c/(1+R) */ if (ppr->tight_coupling_approximation >= (int)second_order_CLASS) { F_prime = dtau_c/(1+R)+tau_c*a_prime_over_a*R/(1+R)/(1+R); /*F' needed by second_order_CLASS and compromise_CLASS */ if (ppr->tight_coupling_approximation == (int)second_order_CLASS) { F_prime_prime =(- pvecthermo[pth->index_th_dddkappa]*tau_c*tau_c /* F'' needed by second_order_CLASS only */ + 2.*pvecthermo[pth->index_th_ddkappa]*pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c*tau_c)/(1+R) +2.*dtau_c*a_prime_over_a*R/(1+R)/(1+R) +tau_c*((a_primeprime_over_a-2.*a_prime_over_a*a_prime_over_a)+2.*a_prime_over_a*a_prime_over_a*R/(1+R))*R/(1+R)/(1+R); } } /** - --> (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below) - Each continuity equation contains a term in (theta+metric_continuity) with metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge - Each Euler equation contains a source term metric_euler with metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge - Each shear derivative equation contains a source term metric_shear equal to metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge - metric_shear_prime is the derivative of metric_shear - In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge, (-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */ if (ppt->gauge == synchronous) { metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.; metric_euler = 0.; metric_shear = k2 * pvecmetric[ppw->index_mt_alpha]; metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime]; } if (ppt->gauge == newtonian) { metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime]; metric_euler = k2*pvecmetric[ppw->index_mt_psi]; metric_shear = 0.; metric_shear_prime = 0.; } /** - --> (d) if some approximation schemes are turned on, enforce a few y[ ] values computed in perturb_einstein */ /* free-streaming photon velocity */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) theta_g = ppw->rsa_theta_g; /** - ---> like Ma & Bertschinger */ if (ppr->tight_coupling_approximation == (int)first_order_MB) { slip=2.*R/(1.+R)*a_prime_over_a*(theta_b-theta_g) +F*(-a_primeprime_over_a*theta_b +k2*(-a_prime_over_a*delta_g/2. +cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4.) -a_prime_over_a*metric_euler); } /** - ---> relax assumption dkappa~a\f$^{-2}\f$ (like in CAMB) */ if ((ppr->tight_coupling_approximation == (int)first_order_CAMB) || (ppr->tight_coupling_approximation == (int)compromise_CLASS)) { slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g) +F*(-a_primeprime_over_a*theta_b +k2*(-a_prime_over_a*delta_g/2. +cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4.) -a_prime_over_a*metric_euler); } /** - ---> also relax assumption cb2~a\f$^{-1}\f$ */ if ((ppr->tight_coupling_approximation == (int)first_order_CLASS) || (ppr->tight_coupling_approximation == (int)second_order_CLASS)){ slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g) +F*(-a_primeprime_over_a*theta_b +k2*(-a_prime_over_a*delta_g/2. +pvecthermo[pth->index_th_dcb2]*delta_b +cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4.) -a_prime_over_a*metric_euler); } /** - ---> intermediate quantities for 2nd order tca: shear_g at first order in tight-coupling */ shear_g=16./45.*tau_c*(theta_g+metric_shear); /* (Ma & Bertschinger give (1/9)*(4/3) instead of (2/15)*(4/3) because they didn't include the contribution of G_gamma0 and G_gamma2, which are of the same order as sigma_g. This was already consistently included in CAMB) */ /** - ---> intermediate quantities for 2nd order tca: zero order for theta_b' = theta_g' */ /** - ----> perturbed recombination has an impact **/ theta_prime = (-a_prime_over_a*theta_b+k2*(cb2*(delta_b+delta_temp)+R/4.*delta_g))/(1.+R) + metric_euler; /** - ---> intermediate quantities for 2nd order tca: shear_g_prime at first order in tight-coupling */ shear_g_prime=16./45.*(tau_c*(theta_prime+metric_shear_prime)+dtau_c*(theta_g+metric_shear)); /** - ---> 2nd order as in CRS*/ if (ppr->tight_coupling_approximation == (int)second_order_CRS) { if (ppt->gauge == newtonian) { class_stop(error_message, "the second_order_CRS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme"); } if (ppt->gauge == synchronous) { class_test(pba->sgnK != 0, ppt->error_message, "the second_order_CRS approach to tight-coupling is coded in the flat case only: for non-flat try another tight-coupling scheme"); /* infer Delta from h'' using Einstein equation */ Delta = 2*k2*y[pv->index_pt_eta] -2*a_prime_over_a*pvecmetric[ppw->index_mt_h_prime] -pvecmetric[ppw->index_mt_h_prime_prime]; /* monster expression for slip at second-order in tight-coupling */ slip=(-2./(1.+R)*a_prime_over_a-pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa])*(theta_b-theta_g) +(-a_primeprime_over_a*theta_b -k2*a_prime_over_a*(delta_g/2.-2.*shear_g) +k2*(cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4. +shear_g_prime) )/pvecthermo[pth->index_th_dkappa]/(1.+R) -2.*R*(3.*a_prime_over_a*a_prime_over_a*cb2+(1.+R)*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)-3.*a_prime_over_a*a_prime_over_a) /(1.+R)/(1.+R)/(1.+R)*(theta_b-theta_g)/pvecthermo[pth->index_th_dkappa] +( a_primeprime_over_a*a_prime_over_a*((2.-3.*cb2)*R-2.)*theta_b/(1.+R) +a_prime_over_a*k2*(1.-3.*cb2)*theta_b/3./(1.+R) /* perturbed recombination has an impact (next two lines) */ +a_primeprime_over_a*k2*cb2*(delta_b+delta_temp)/(1.+R) +k2*k2*(3.*cb2-1.)*cb2*(delta_b+delta_temp)/3./(1.+R) +k2*k2*R*(3.*cb2-1.)*delta_g/12./(1.+R) +a_primeprime_over_a*k2*(2.+3.*R)*delta_g/4./(1.+R) +a_prime_over_a*a_prime_over_a*k2*((2.-3.*cb2)*R-1.)*delta_g/2./(1.+R) +a_prime_over_a*k2*cb2*(1.+(3.*cb2-2.)*R)*(-theta_b-metric_continuity)/(1.+R) +a_prime_over_a*k2*(2.+(5.-3.*cb2)*R)*4./3.*(-theta_g-metric_continuity)/4./(1.+R) +a_prime_over_a*(1.-3.*cb2)*k2*2.*metric_shear/3. +k2*k2*(3.*cb2-1.)*y[pv->index_pt_eta]/3. +2.*a_prime_over_a*k2*(3.*cb2-1.)*pvecmetric[ppw->index_mt_eta_prime] +k2*(1.-3.*cb2)*Delta/6. )/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/(1.+R)/(1.+R) -(4.*a_primeprime_over_a*theta_b-4.*k2*cb2*(-theta_b-metric_continuity)+2.*a_prime_over_a*k2*delta_g+k2*4./3.*(-theta_g-metric_continuity))/2./(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa] +4.*a_prime_over_a*R/(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g); /* second-order correction to shear */ shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+k2*pvecmetric[ppw->index_mt_alpha_prime]); } } /** - ---> 2nd order like in CLASS paper */ if (ppr->tight_coupling_approximation == (int)second_order_CLASS) { if (ppt->gauge == newtonian) { class_stop(error_message, "the second_order_CLASS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme"); } if (ppt->gauge == synchronous) { /* zero order for theta_b'' = theta_g'' */ theta_prime_prime = ((R-1.)*a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b +k2*(pvecthermo[pth->index_th_dcb2]*delta_b+cb2*(-theta_b-metric_continuity)-a_prime_over_a*R/4.*delta_g+R/4.*4./3.*(-theta_g-metric_continuity)))/(1.+R); /* zero-order quantities g0, g0', go'' */ g0 = -a_prime_over_a*theta_b + k2*(cb2*delta_b-delta_g/4.); g0_prime = -a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b+k2*(pvecthermo[pth->index_th_dcb2]*delta_b+(1./3.-cb2)*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime])); g0_prime_prime = -a_prime_over_a*theta_prime_prime-2.*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_prime -(2.*a_prime_over_a*a_prime_over_a*a_prime_over_a-3.*a_primeprime_over_a*a_prime_over_a)*theta_b +k2*(pvecthermo[pth->index_th_ddcb2]*delta_b-2.*pvecthermo[pth->index_th_dcb2]*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime])+(1./3.-cb2)*(theta_prime+0.5*pvecmetric[ppw->index_mt_h_prime_prime])); /* slip at second order */ slip = (1.-2*a_prime_over_a*F)*slip + F*k2*s2_squared*(2.*a_prime_over_a*shear_g+shear_g_prime) -F*(F_prime_prime*g0+2.*F_prime*g0_prime+F*g0_prime_prime); /* second-order correction to shear */ shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime); } } /** - ---> add only the most important 2nd order terms */ if (ppr->tight_coupling_approximation == (int)compromise_CLASS) { /* slip at second order (only leading second-order terms) */ slip = (1.-2.*a_prime_over_a*F)*slip + F*k2*(2.*a_prime_over_a*s2_squared*shear_g+s2_squared*shear_g_prime-(1./3.-cb2)*(F*theta_prime+2.*F_prime*theta_b)); /* second-order correction to shear */ shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime); } /** - ---> store tight-coupling values of photon shear and its derivative */ ppw->tca_shear_g = shear_g; ppw->tca_slip = slip; return _SUCCESS_; } int perturb_rsa_delta_and_theta( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, double k, double * y, double a_prime_over_a, double * pvecthermo, struct perturb_workspace * ppw ) { /* - define local variables */ double k2; k2 = k*k; // formulas below TBC for curvaturema /* newtonian gauge */ if (ppt->gauge == newtonian) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_g = 0.; ppw->rsa_theta_g = 0.; } else { ppw->rsa_delta_g = -4.*y[ppw->pv->index_pt_phi]; ppw->rsa_theta_g = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime]; } if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) { ppw->rsa_delta_g += -4./k2*ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_b]; ppw->rsa_theta_g += 3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]*y[ppw->pv->index_pt_theta_b] +ppw->pvecthermo[pth->index_th_dkappa]* (-a_prime_over_a*y[ppw->pv->index_pt_theta_b] +ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b] +k2*y[ppw->pv->index_pt_phi])); } if (pba->has_ur == _TRUE_) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_ur = 0.; ppw->rsa_theta_ur = 0.; } else { ppw->rsa_delta_ur = -4.*y[ppw->pv->index_pt_phi]; ppw->rsa_theta_ur = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime]; } } } } /* synchronous gauge */ if (ppt->gauge == synchronous) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_g = 0.; ppw->rsa_theta_g = 0.; } else { ppw->rsa_delta_g = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime] -k2*y[ppw->pv->index_pt_eta]); ppw->rsa_theta_g = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime]; } if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) { ppw->rsa_delta_g += -4./k2*ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_b]+0.5*ppw->pvecmetric[ppw->index_mt_h_prime]); ppw->rsa_theta_g += 3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]* (y[ppw->pv->index_pt_theta_b] +0.5*ppw->pvecmetric[ppw->index_mt_h_prime]) +ppw->pvecthermo[pth->index_th_dkappa]* (-a_prime_over_a*y[ppw->pv->index_pt_theta_b] + ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b] -a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime] +k2*y[ppw->pv->index_pt_eta])); } if (pba->has_ur == _TRUE_) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_ur = 0.; ppw->rsa_theta_ur = 0.; } else { ppw->rsa_delta_ur = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime] -k2*y[ppw->pv->index_pt_eta]); ppw->rsa_theta_ur = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime]; } } } } return _SUCCESS_; }
GB_unop__isnan_bool_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isnan_bool_fc64) // op(A') function: GB (_unop_tran__isnan_bool_fc64) // C type: bool // A type: GxB_FC64_t // cast: GxB_FC64_t cij = (aij) // unaryop: cij = GB_cisnan (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cisnan (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = (aij) ; \ Cx [pC] = GB_cisnan (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isnan_bool_fc64) ( bool *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = GB_cisnan (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = GB_cisnan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isnan_bool_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pairwise_transform.h
/* * pairwise_transform.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef PAIRWISE_TRANSFORM_H_ #define PAIRWISE_TRANSFORM_H_ #ifdef __JNI__ #include <jni.h> #endif #include <op.h> #include <omp.h> #include <templatemath.h> #include <helper_cuda.h> #include <shape.h> #include <pairwise_util.h> #include <dll.h> #include <stdio.h> #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #endif namespace functions { namespace pairwise_transforms { #define MIN 1e-12 /** * Transforms involving 2 arrays */ template<typename T> class PairWiseTransform : public virtual functions::ops::Op<T> { protected: bool requiresSpecial = false; public: virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) = 0; virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) = 0; #ifdef __CUDACC__ /** * */ virtual __inline__ __device__ void transform( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, Nd4jIndex n, int *indexes,int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { transform(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, indexes, indexes, indexes, allocationPointer, manager, tadOnlyShapeInfo); } /** * */ virtual __inline__ __device__ void transform( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *resultIndexes,int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { int tid = blockIdx.x * blockDim.x + threadIdx.x; Nd4jIndex n = shape::length(xShapeBuffer); for (int i = tid; i < n; i += gridDim.x * blockDim.x) { result[resultIndexes[i]] = op(dx[indexes[i]],y[yIndexes[i]], extraParams); } } /** * */ virtual __inline__ __device__ void transform( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes,int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { transform(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, indexes, yIndexes, indexes, allocationPointer, manager, tadOnlyShapeInfo); } /** * */ virtual __inline__ __device__ void transformCuda( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int xRank = shape::rank(xShapeBuffer); int yRank = shape::rank(yShapeBuffer); int resultRank = shape::rank(resultShapeBuffer); Nd4jIndex n = shape::length(xShapeBuffer); if(shape::elementWiseStride(xShapeBuffer) >= 1 && shape::elementWiseStride(yShapeBuffer) >= 1 && shape::elementWiseStride(resultShapeBuffer) >= 1 && shape::order(xShapeBuffer) == shape::order(yShapeBuffer) && shape::order(resultShapeBuffer) == shape::order(xShapeBuffer)) { // TODO: this is wrong, and should be moved to host side transformCuda( n, dx, y, shape::elementWiseStride(xShapeBuffer), shape::elementWiseStride(yShapeBuffer), extraParams, result, shape::elementWiseStride(resultShapeBuffer), allocationPointer, manager, tadOnlyShapeInfo); } else { /* long allocSize = sizeof(int) * (xRank + yRank + resultRank); int *tB = shape::cuMalloc(manager->getT1ShapeBuffer(), allocSize); int *xCoord = tB; int *yCoord = tB + xRank; int *resultCoord = yCoord + yRank; */ int xCoord[MAX_RANK]; int yCoord[MAX_RANK]; if (dx == result) { for (int i = tid; i < n; i += gridDim.x * blockDim.x) { shape::ind2subC(xRank,shape::shapeOf(xShapeBuffer), i, xCoord); shape::ind2subC(yRank,shape::shapeOf(yShapeBuffer), i, yCoord); Nd4jIndex xOffset = shape::getOffset(0, shape::shapeOf(xShapeBuffer), shape::stride(xShapeBuffer), xCoord, xRank); Nd4jIndex yOffset = shape::getOffset(0, shape::shapeOf(yShapeBuffer), shape::stride(yShapeBuffer), yCoord, yRank); result[xOffset] = op(dx[xOffset], y[yOffset], extraParams); } } else { int resultCoord[MAX_RANK]; for (int i = tid; i < n; i += gridDim.x * blockDim.x) { shape::ind2subC(xRank,shape::shapeOf(xShapeBuffer), i, xCoord); shape::ind2subC(yRank,shape::shapeOf(yShapeBuffer), i, yCoord); shape::ind2subC(resultRank,shape::shapeOf(resultShapeBuffer), i, resultCoord); Nd4jIndex xOffset = shape::getOffset(0, shape::shapeOf(xShapeBuffer), shape::stride(xShapeBuffer), xCoord, xRank); Nd4jIndex yOffset = shape::getOffset(0, shape::shapeOf(yShapeBuffer), shape::stride(yShapeBuffer), yCoord, yRank); Nd4jIndex resultOffset = shape::getOffset(0, shape::shapeOf(resultShapeBuffer), shape::stride(resultShapeBuffer), resultCoord, resultRank); result[resultOffset] = op(dx[xOffset], y[yOffset], extraParams); } } } } /** * * @param n * @param xOffset * @param yOffset * @param resultOffset * @param dx * @param dy * @param incx * @param incy * @param params * @param result * @param incz * @param blockSize */ virtual __inline__ __device__ void transformCuda( Nd4jIndex n, T *dx, T *dy, int incx, int incy, T *params, T *result, int incz,int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (incy == 0) { #pragma unroll for (int i = tid; i < n; i+= gridDim.x * blockDim.x) { result[i * incz] = op(dx[i * incx], params); } } else if ((incx == incy) && (incx > 0)) { /* equal, positive, increments */ if (incx == 1) { /* both increments equal to 1 */ #pragma unroll for (int i = tid; i < n; i += gridDim.x * blockDim.x) { result[i * incz] = op(dx[i * incx], dy[i * incy], params); } } else { /* equal, positive, non-unit increments. */ #pragma unroll for (int i = tid; i < n; i += gridDim.x * blockDim.x) { result[i * incz] = op(dx[i * incx], dy[i * incy], params); } } } else { /* unequal or nonpositive increments */ #pragma unroll for (int i = tid; i < n; i += gridDim.x * blockDim.x) { result[i * incz] = op(dx[i * incx], dy[i * incy], params); } } } #endif public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void exec( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes) { exec(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, indexes, yIndexes, indexes); } /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void exec( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *resultIndexes) { Nd4jIndex n = shape::length(xShapeBuffer); #pragma omp parallel for simd schedule(guided) for (Nd4jIndex i = 0; i < n; i++) { result[resultIndexes[i]] = op(dx[indexes[i]], y[yIndexes[i]], extraParams); } } /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param indexes which indexes to copy */ virtual void exec( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes) { Nd4jIndex n = shape::length(xShapeBuffer); #pragma omp parallel for simd schedule(guided) for (Nd4jIndex i = 0; i < n; i++) { result[indexes[i]] = op(dx[indexes[i]],y[indexes[i]], extraParams); } } /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) = 0; /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform */ virtual void exec( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) { Nd4jIndex n = shape::length(xShapeBuffer); int xElementWiseStride = shape::elementWiseStride(xShapeBuffer); int yElementWiseStride = shape::elementWiseStride(yShapeBuffer); int resultElementWiseStride = shape::elementWiseStride(resultShapeBuffer); bool sameShape = shape::shapeEquals(shape::rank(xShapeBuffer), shape::shapeOf(xShapeBuffer), shape::rank(yShapeBuffer), shape::shapeOf(yShapeBuffer)); //ignore everything else if (this->requiresSpecial) { this->execSpecial(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams); return; } if (xElementWiseStride >= 1 && yElementWiseStride >= 1 && resultElementWiseStride >= 1 && shape::order(xShapeBuffer) == shape::order(yShapeBuffer) && shape::order(resultShapeBuffer) == shape::order(xShapeBuffer) && sameShape && xElementWiseStride == yElementWiseStride) { exec(dx, xElementWiseStride, y, yElementWiseStride, result, resultElementWiseStride, extraParams, n); } //not same shape else if (!sameShape && shape::order(xShapeBuffer) == shape::order(yShapeBuffer) && shape::order(resultShapeBuffer) == shape::order(xShapeBuffer) && xElementWiseStride >= 1 && yElementWiseStride >= 1 && resultElementWiseStride >= 1 && xElementWiseStride == yElementWiseStride) { exec(dx, xElementWiseStride, y, yElementWiseStride, result, resultElementWiseStride, extraParams, shape::length(yShapeBuffer)); } else if (sameShape) { int rank = shape::rank(xShapeBuffer); int *xShape = shape::shapeOf(xShapeBuffer); int *xStride = shape::stride(xShapeBuffer); int *yStride = shape::stride(yShapeBuffer); int *resultStride = shape::stride(resultShapeBuffer); int shapeIter[MAX_RANK]; int coord[MAX_RANK]; int dim; int xStridesIter[MAX_RANK]; int yStridesIter[MAX_RANK]; int resultStridesIter[MAX_RANK]; if (PrepareThreeRawArrayIter<T>(rank, xShape, dx, xStride, y, yStride, result, resultStride, rank, shapeIter, &dx, xStridesIter, &y, yStridesIter, &result, resultStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { /* Process the innermost dimension */ T *xIter = dx; T *yIter = y; T *resultIter = result; resultIter[0] = op(xIter[0], yIter[0], extraParams); } ND4J_RAW_ITER_THREE_NEXT(dim, rank, coord, shapeIter, dx, xStridesIter, y, yStridesIter, result, resultStridesIter); } else { printf("Unable to prepare array\n"); } } else { Nd4jIndex len = shape::length(xShapeBuffer); int xRank = shape::rank(xShapeBuffer); int yRank = shape::rank(yShapeBuffer); int resultRank = shape::rank(resultShapeBuffer); int *xCoord = new int[xRank]; int *yCoord = new int[yRank]; int *resultCoord = new int[resultRank]; int *xShape = shape::shapeOf(xShapeBuffer); int *xStride = shape::stride(xShapeBuffer); int *yShape = shape::shapeOf(yShapeBuffer); int *yStride = shape::stride(yShapeBuffer); int *resultShape = shape::shapeOf(resultShapeBuffer); if(dx == result) { for (Nd4jIndex i = 0; i < len; i++) { shape::ind2subC(xRank,xShape, i, xCoord); shape::ind2subC(yRank,yShape, i, yCoord); shape::ind2subC(resultRank,resultShape, i, resultCoord); Nd4jIndex xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); Nd4jIndex yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank); result[xOffset] = op(dx[xOffset], y[yOffset], extraParams); } } else { for (Nd4jIndex i = 0; i < len; i++) { shape::ind2subC(xRank,xShape, i, xCoord); shape::ind2subC(yRank,yShape, i, yCoord); shape::ind2subC(resultRank,resultShape, i, resultCoord); Nd4jIndex xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); Nd4jIndex yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank); Nd4jIndex resultOffset = shape::getOffset(0, resultShape, resultShape, resultCoord, resultRank); result[resultOffset] = op(dx[xOffset], y[yOffset], extraParams); } } delete[] xCoord; delete[] yCoord; delete []resultCoord; } } /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void exec(T *dx, Nd4jIndex xStride, T *y, Nd4jIndex yStride, T *result, Nd4jIndex resultStride, T *extraParams, Nd4jIndex n) { if (xStride == 1 && yStride == 1 && resultStride == 1) { if(n < 8000) { #pragma omp simd for (Nd4jIndex i = 0; i < n; i++) { result[i] = op(dx[i], y[i], extraParams); } } else { #pragma omp parallel for simd schedule(guided) for (Nd4jIndex i = 0; i < n; i++) { result[i] = op(dx[i], y[i], extraParams); } } } else { if(n < 8000) { #pragma omp simd for (Nd4jIndex i = 0; i < n; i++) { result[i * resultStride] = op(dx[i * xStride], y[i * yStride], extraParams); } } else { #pragma omp parallel for simd schedule(guided) for (Nd4jIndex i = 0; i < n; i++) { result[i * resultStride] = op(dx[i * xStride], y[i * yStride], extraParams); } } } } virtual inline #ifdef __CUDACC__ __host__ __device__ #endif void aggregateExtraParams(T **extraParamsTotal,T **extraParamsLocal) { //no extra params aggregation needs to happen } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~PairWiseTransform() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif PairWiseTransform() { } }; namespace ops { /** * x + y */ template<typename T> class Add: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) { //no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d1 + d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~Add() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif Add() { } }; /** * Copy y to x */ template<typename T> class Copy: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { (void)d1; (void)params; return d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { (void)params; return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~Copy() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif Copy() { } }; /** * Divide x / y */ template<typename T> class Divide: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x inputCopy * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d1 / d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~Divide() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif Divide() { } }; /** *Set x to y */ template<typename T> class Set: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~Set() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif Set() { } }; /** * Whether 2 elements in an array * are epsilion equal */ template<typename T> class Epsilon: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { T diff = d1 - d2; T absDiff = nd4j::math::nd4j_abs(diff); if (absDiff < MIN) return 1; return 0; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~Epsilon() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif Epsilon() { } }; /** * x == y (binary result) */ template<typename T> class EqualTo: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d1 == d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~EqualTo() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif EqualTo() { } }; /** * x == y (binary result) */ template<typename T> class NotEqualTo: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d1 != d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~NotEqualTo() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif NotEqualTo() { } }; /** * Whether x > y */ template<typename T> class GreaterThanOrEqual: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d1 >= d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~GreaterThanOrEqual() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif GreaterThanOrEqual() { } }; /** * Whether x > y */ template<typename T> class GreaterThan: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d1 > d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~GreaterThan() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif GreaterThan() { } }; /** * Whether x < y */ template<typename T> class LessThan: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d1 < d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~LessThan() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif LessThan() { } }; /** * Whether x < y */ template<typename T> class LessThanOrEqual: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d1 <= d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~LessThanOrEqual() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif LessThanOrEqual() { } }; /** * x * y */ template<typename T> class Multiply: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d1 * d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~Multiply() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif Multiply() { } }; /** * y / x */ template<typename T> class ReverseDivide: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d2 / d1; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~ReverseDivide() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif ReverseDivide() { } }; /** * y - x */ template<typename T> class ReverseSubtraction: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d2 - d1; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~ReverseSubtraction() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif ReverseSubtraction() { } }; /** * x - y */ template<typename T> class Subtract: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return d1 - d2; } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~Subtract() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif Subtract() { } }; /** * x - y */ template<typename T> class Max: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return nd4j::math::nd4j_max<T>(d1,d2); } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~Max() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif Max() { } }; /** * x - y */ template<typename T> class Min: public virtual PairWiseTransform<T> { public: /** * CPU operation execution * @param dx the input data * @param xStride the stride to iterate over * the x input * @param y the y data * @param yStride the stride to iterate * over the y buffer * @param result the buffer * to store the result in * @param resultStride the stride for the buffer * @param extraParams the extra parameters for the transform * @param n the length of the input */ virtual void execSpecial( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) {//no-op } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T d2, T *params) { return nd4j::math::nd4j_min(d1,d2); } virtual #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif T op(T d1, T *params) { return d1; } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif virtual ~Min() { } #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif Min() { } }; } /** * Creates pair wise operations. */ template<typename T> class PairWiseTransformOpFactory { public: #ifdef __CUDACC__ __host__ __device__ #endif PairWiseTransformOpFactory() { } /** * Create an operation * @param op the op number * 0: Add * 1: Copy * 2: Divie * 3: equal to * 4: greater than * 5: less than * 6: multiply * 7: reverse divide * 8 reverse subtract * 9: subtract * @return the operation based on the op number */ #ifdef __CUDACC__ __inline__ __device__ PairWiseTransform<T> * getOp(int op, unsigned char *buffer) { #else PairWiseTransform<T> * getOp(int op) { #endif if (op == 0) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::Add<T>(); #else return new pairwise_transforms::ops::Add<T>(); #endif else if (op == 1) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::Copy<T>(); #else return new pairwise_transforms::ops::Copy<T>(); #endif else if (op == 2) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::Divide<T>(); #else return new pairwise_transforms::ops::Divide<T>(); #endif else if (op == 3) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::EqualTo<T>(); #else return new pairwise_transforms::ops::EqualTo<T>(); #endif else if (op == 4) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::GreaterThan<T>(); #else return new pairwise_transforms::ops::GreaterThan<T>(); #endif else if (op == 5) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::LessThan<T>(); #else return new pairwise_transforms::ops::LessThan<T>(); #endif else if (op == 6) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::Multiply<T>(); #else return new pairwise_transforms::ops::Multiply<T>(); #endif if (op == 7) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::ReverseDivide<T>(); #else return new pairwise_transforms::ops::ReverseDivide<T>(); #endif if (op == 8) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::ReverseSubtraction<T>(); #else return new pairwise_transforms::ops::ReverseSubtraction<T>(); #endif if (op == 9) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::Subtract<T>(); #else return new pairwise_transforms::ops::Subtract<T>(); #endif if (op == 10) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::Epsilon<T>(); #else return new pairwise_transforms::ops::Epsilon<T>(); #endif if(op == 11) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::GreaterThanOrEqual<T>(); #else return new pairwise_transforms::ops::GreaterThanOrEqual<T>(); #endif if(op == 12) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::LessThanOrEqual<T>(); #else return new pairwise_transforms::ops::LessThanOrEqual<T>(); #endif if(op == 13) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::Max<T>(); #else return new pairwise_transforms::ops::Max<T>(); #endif if(op == 14) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::Min<T>(); #else return new pairwise_transforms::ops::Min<T>(); #endif if(op == 15) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::NotEqualTo<T>(); #else return new pairwise_transforms::ops::NotEqualTo<T>(); #endif if(op == 16) #ifdef __CUDACC__ return new(buffer) pairwise_transforms::ops::Set<T>(); #else return new pairwise_transforms::ops::Set<T>(); #endif return nullptr; } }; } } #ifdef __CUDACC__ /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ template <typename T> __device__ void pairWiseTransformGeneric( int opNum, T *dx, T *dy, T *params, T *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *allocationPointer, int *tadOnlyShapeInfo) { __shared__ functions::pairwise_transforms::PairWiseTransform<T> *op; __shared__ functions::pairwise_transforms::PairWiseTransformOpFactory<T> *newOpFactory; __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), sizeof(functions::pairwise_transforms::PairWiseTransformOpFactory<T>), sizeof(functions::pairwise_transforms::PairWiseTransform<T>), sizeof(shape::TAD), xRank); } __syncthreads(); /* __shared__ int *ptrSharedXShapeInfo; __shared__ int *ptrSharedYShapeInfo; __shared__ int *ptrSharedZShapeInfo; if (xShapeInfo != nullptr) { shape::sweepShapeInfoBuffer(xShapeInfo, manager->getXShapeBuffer()); if (threadIdx.x == 0) ptrSharedXShapeInfo = manager->getXShapeBuffer(); } else if (threadIdx.x == 0) ptrSharedXShapeInfo = nullptr; if (yShapeInfo != nullptr) { shape::sweepShapeInfoBuffer(yShapeInfo, manager->getYShapeBuffer()); if (threadIdx.x == 0) ptrSharedYShapeInfo = manager->getYShapeBuffer(); } else if (threadIdx.x == 0) ptrSharedYShapeInfo = nullptr; if (resultShapeInfo != nullptr) { shape::sweepShapeInfoBuffer(resultShapeInfo, manager->getZShapeBuffer()); if (threadIdx.x == 0) ptrSharedZShapeInfo = manager->getZShapeBuffer(); } else if (threadIdx.x == 0) ptrSharedZShapeInfo = nullptr; */ if(threadIdx.x == 0) { newOpFactory = new(manager->getFactorySpace()) functions::pairwise_transforms::PairWiseTransformOpFactory<T>(); op = newOpFactory->getOp(opNum, manager->getFunctionSpace()); } __syncthreads(); op->transformCuda( dx, xShapeInfo, dy, yShapeInfo, result, resultShapeInfo, params, allocationPointer, manager, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ extern "C" __global__ void pairWiseTransformDouble( int opNum, double *dx, double *dy, double *params, double *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<double>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ extern "C" __global__ void pairWiseTransformFloat( int opNum, float *dx, float *dy, float *params, float *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<float>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ template <typename T> __device__ void pairWiseTransformGeneric( int opNum, T *dx, T *dy, T *params, T *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *xIndexes, int *yIndexes, int *resultIndexes, int *allocationPointer, int *tadOnlyShapeInfo) { __shared__ functions::pairwise_transforms::PairWiseTransform<T> *op; __shared__ functions::pairwise_transforms::PairWiseTransformOpFactory<T> *newOpFactory; __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), sizeof(functions::pairwise_transforms::PairWiseTransformOpFactory<T>), sizeof(functions::pairwise_transforms::PairWiseTransform<T>), sizeof(shape::TAD), xRank); } __syncthreads(); /* __shared__ int *ptrSharedXShapeInfo; __shared__ int *ptrSharedYShapeInfo; __shared__ int *ptrSharedZShapeInfo; if (xShapeInfo != nullptr) { shape::sweepShapeInfoBuffer(xShapeInfo, manager->getXShapeBuffer()); if (threadIdx.x == 0) ptrSharedXShapeInfo = manager->getXShapeBuffer(); } else if (threadIdx.x == 0) ptrSharedXShapeInfo = nullptr; if (yShapeInfo != nullptr) { shape::sweepShapeInfoBuffer(yShapeInfo, manager->getYShapeBuffer()); if (threadIdx.x == 0) ptrSharedYShapeInfo = manager->getYShapeBuffer(); } else if (threadIdx.x == 0) ptrSharedYShapeInfo = nullptr; if (resultShapeInfo != nullptr) { shape::sweepShapeInfoBuffer(resultShapeInfo, manager->getZShapeBuffer()); if (threadIdx.x == 0) ptrSharedZShapeInfo = manager->getZShapeBuffer(); } else if (threadIdx.x == 0) ptrSharedZShapeInfo = nullptr; */ if(threadIdx.x == 0) { newOpFactory = new(manager->getFactorySpace()) functions::pairwise_transforms::PairWiseTransformOpFactory<T>(); op = newOpFactory->getOp(opNum, manager->getFunctionSpace()); } __syncthreads(); op->transform( dx, xShapeInfo, dy, yShapeInfo, result, resultShapeInfo, params, xIndexes, yIndexes, resultIndexes, allocationPointer, manager, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ __global__ void pairWiseTransformDoubleIndex( int opNum, double *dx, double *dy, double *params, double *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *xIndexes, int *yIndexes, int *resultIndexes, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<double>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, xIndexes, yIndexes, resultIndexes, allocationPointer, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ __global__ void pairWiseTransformFloatIndex( int opNum, float *dx, float *dy, float *params, float *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *xIndexes, int *yIndexes, int *resultIndexes, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<float>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, xIndexes, yIndexes, resultIndexes, allocationPointer, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ template<typename T> __device__ void pairWiseTransformStridedGeneric( int opNum, Nd4jIndex n, T *dx, T *dy, int incx, int incy, T *params, T *result, int incz, int *allocationPointer, int *tadOnlyShapeInfo) { __shared__ functions::pairwise_transforms::PairWiseTransform<T> *op; __shared__ functions::pairwise_transforms::PairWiseTransformOpFactory<T> *newOpFactory; __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), sizeof(functions::pairwise_transforms::PairWiseTransformOpFactory<T>), sizeof(functions::pairwise_transforms::PairWiseTransform<T>), sizeof(shape::TAD), 0); newOpFactory = new(manager->getFactorySpace()) functions::pairwise_transforms::PairWiseTransformOpFactory<T>(); op = newOpFactory->getOp(opNum, manager->getFunctionSpace()); } __syncthreads(); op->transformCuda(n, dx, dy, incx, incy, params, result, incz, allocationPointer, manager, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ __global__ void pairWiseTransformStridedDouble( int opNum, Nd4jIndex n, double *dx, double *dy, int incx, int incy, double *params, double *result, int incz, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformStridedGeneric<double>( opNum, n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ __global__ void pairWiseTransformStridedFloat( int opNum, Nd4jIndex n, float *dx, float *dy, int incx, int incy, float *params, float *result, int incz, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformStridedGeneric<float>( opNum, n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo); } #endif #endif /* PAIRWISE_TRANSFORM_H_ */
file_io.c
#include "file_io.h" void compress_annotations_disk(BFT_Root* bft, char* filename_bft){ ASSERT_NULL_PTR(bft, "compress_annotations_disk()\n") ASSERT_NULL_PTR(filename_bft, "compress_annotations_disk()\n") Pvoid_t comp_annots = (PWord_t)NULL; Word_t Rc_word; memory_Used* mem; bool is_compressed = (bft->compressed == 1 ? true : false); int len_longest_annot; int lvl_bft = (bft->k / NB_CHAR_SUF_PREF) - 1; char* filename_bft_tmp = malloc((strlen(filename_bft) + 50) * sizeof(char)); ASSERT_NULL_PTR(filename_bft_tmp, "compress_annotations_disk()\n") strcpy(filename_bft_tmp, filename_bft); strcpy(&filename_bft_tmp[strlen(filename_bft)], "_annots"); mem = printMemoryUsedFromNode(&(bft->node), lvl_bft, bft->k, bft->info_per_lvl); len_longest_annot = (int) MAX(mem->size_biggest_annot+1, getMaxSize_annotation_array_elem(bft->comp_set_colors)); free(mem); if (is_compressed){ bft->compressed = 0; write_BFT_Root(bft, filename_bft, false); bft->compressed = 1; } else write_BFT_Root(bft, filename_bft, false); load_annotation_from_Node(&(bft->node), lvl_bft, bft->k, len_longest_annot, bft->info_per_lvl, &comp_annots, bft->comp_set_colors, bft->ann_inf); //Load annot in a compressed way freeNode(&bft->node, lvl_bft, bft->info_per_lvl); free_annotation_array_elem(&(bft->comp_set_colors), &(bft->length_comp_set_colors)); sort_annotations3(&comp_annots, len_longest_annot); write_partial_comp_set_colors(filename_bft_tmp, &comp_annots, len_longest_annot); //Write new annots compressed read_BFT_replace_comp_annots_bis(bft, filename_bft, filename_bft_tmp, &comp_annots, len_longest_annot, is_compressed); if (remove(filename_bft)) printf("Warning: Could not remove temporary file.\n"); if (is_compressed) bft->compressed = 1; #if defined (_WORDx86) Word_t * PValue; uint8_t* it_index = calloc((len_longest_annot + CEIL(len_longest_annot, SIZE_BITS_UINT_8T - 1) + 4), sizeof(uint8_t)); ASSERT_NULL_PTR(it_index, "compressKmers_from_KmerFiles()\n"); JSLF(PValue, comp_annots, it_index); while (PValue != NULL){ free(*PValue); JSLN(PValue, comp_annots, it_index); } free(it_index); #endif JSLFA(Rc_word, comp_annots); free_annotation_array_elem(&(bft->comp_set_colors), &(bft->length_comp_set_colors)); read_annotation_array_elem(filename_bft_tmp, &(bft->comp_set_colors), &(bft->length_comp_set_colors)); if (remove(filename_bft_tmp)) printf("Warning: Could not remove temporary file.\n"); free(filename_bft_tmp); return; } /* --------------------------------------------------------------------------------------------------------------- * insert_Genomes_from_KmerFiles(root, filenames, binary_files, size_kmer, ptr_ptr_annot_sorted) * --------------------------------------------------------------------------------------------------------------- * Insert k-mers from k-mer files into a BFT * --------------------------------------------------------------------------------------------------------------- * root: ptr to the root of a BFT * filenames: array of filenames. The files contains the k-mers to insert. * binary_files: Indicate if the files contains k-mers (ASCII) or compressed k-mers (2 bits per nuc.) * size_kmer: length k of k-mers in files * --------------------------------------------------------------------------------------------------------------- */ void insert_Genomes_from_KmerFiles(BFT_Root* root, int nb_files, char** filenames, int binary_files, char* filename_bft){ ASSERT_NULL_PTR(root,"insert_Genomes_from_KmerFiles()") ASSERT_NULL_PTR(filenames,"insert_Genomes_from_KmerFiles()") struct timeval tval_before, tval_after, tval_last, tval_result; gettimeofday(&tval_before, NULL); tval_last = tval_before; FILE* file; uint8_t* array_kmers = calloc(SIZE_BUFFER, sizeof(uint8_t)); ASSERT_NULL_PTR(array_kmers,"insert_Genomes_from_KmerFiles()") char* line = calloc(100, sizeof(char)); ASSERT_NULL_PTR(line,"insert_Genomes_from_KmerFiles()") char* str_tmp; int i = 0, j = 0, k = 0; int size_id_genome = 0; int nb_genomes_before = root->nb_genomes; int nb_bytes_kmer = CEIL(root->k*2, SIZE_BITS_UINT_8T); int nb_kmer_in_buf = SIZE_BUFFER/nb_bytes_kmer; size_t return_fread; uint64_t kmers_read; for (i = 0; i < nb_files; i++){ //For each file in input k = 0; j = 0; kmers_read = 0; str_tmp = basename(filenames[i]); add_genomes_BFT_Root(1, &str_tmp, root); size_id_genome = get_nb_bytes_power2_annot(root->nb_genomes-1); file = fopen(filenames[i], "r"); ASSERT_NULL_PTR(file,"insert_Genomes_from_KmerFiles()") printf("\nFile %d: %s\n\n", root->nb_genomes-1, filenames[i]); if (binary_files){ if (fgets(line, 100, file) != NULL) k = atoi(line); else ERROR("Cannot read header of the file") if (fgets(line, 100, file) != NULL) printf("%d %d-mers in the file\n\n", atoi(line), k); else ERROR("Cannot read header of the file") while ((!ferror(file)) && (!feof(file))){ return_fread = fread(array_kmers, (size_t)nb_bytes_kmer, (size_t)nb_kmer_in_buf, file); insertKmers(root, array_kmers, return_fread, root->nb_genomes-1, size_id_genome); memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+return_fread)%PRINT_EVERY_X_KMERS)){ printf("%" PRIu64 " kmers read\n", kmers_read+return_fread); } kmers_read += return_fread; } } else { while (fgets(line, 100, file) != NULL){ if (parseKmerCount(line, root->k, array_kmers, k) == 1){ k += nb_bytes_kmer; j++; if (j == nb_kmer_in_buf){ insertKmers(root, array_kmers, nb_kmer_in_buf, root->nb_genomes-1, size_id_genome); j = 0; k = 0; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+nb_kmer_in_buf)%PRINT_EVERY_X_KMERS)){ printf("%" PRIu64 " kmers read\n", kmers_read+nb_kmer_in_buf); } kmers_read += nb_kmer_in_buf; } } } insertKmers(root, array_kmers, j, root->nb_genomes-1, size_id_genome); kmers_read += j; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } fclose(file); //if (root->treshold_compression && (root->nb_genomes - 1 > 5) && ((root->nb_genomes - 1) % root->treshold_compression == 0)) //compress_annotations_disk(root, filename_bft); if (root->treshold_compression && (root->nb_genomes - 1 > 5) && (root->nb_genomes == nb_genomes_before + nb_files)) compress_annotations_disk(root, filename_bft); gettimeofday(&tval_after, NULL); time_spent(&tval_last, &tval_after, &tval_result); printf("\nElapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); tval_last = tval_after; } free(line); free(array_kmers); return; } /* --------------------------------------------------------------------------------------------------------------- * insert_Genomes_from_FASTxFiles(root, filenames, size_kmer, ptr_ptr_annot_sorted) * --------------------------------------------------------------------------------------------------------------- * Insert k-mers from FASTx files into a BFT * --------------------------------------------------------------------------------------------------------------- * root: ptr to the root of a BFT * filenames: array of FASTx filenames * size_kmer: length k of k-mers to extract from the FASTx files * --------------------------------------------------------------------------------------------------------------- */ void insert_Genomes_from_FASTxFiles(BFT_Root* root, int nb_files, char** filenames){ ASSERT_NULL_PTR(root,"insert_Genomes_from_FASTxFiles()") ASSERT_NULL_PTR(filenames,"insert_Genomes_from_FASTxFiles()") /*struct timeval tval_before, tval_after, tval_last, tval_result; gettimeofday(&tval_before, NULL); tval_last = tval_before; int i = 0; int size_buf_tmp = 0; //How many characters are stored in buf_tmp int nb_kmers_buf = 0; int size_id_genome = 0; int length_comp_set_colors_tmp = 0; int nb_cell_kmer = CEIL(size_kmer*2, SIZE_BITS_UINT_8T); //Size of kmers in bytes annotation_array_elem* comp_set_colors_tmp = NULL; Pvoid_t PJArray = (PWord_t)NULL; Word_t Rc_word; char* str_tmp; char* buf_tmp = calloc((size_kmer-1)*2, sizeof(char)); //Allocate temporary buffer ASSERT_NULL_PTR(buf_tmp,"insert_Genomes_from_FASTxFiles()") uint8_t* tab_kmers = calloc(SIZE_BUFFER*nb_cell_kmer, sizeof(uint8_t)); //Allocate buffer for kmers ASSERT_NULL_PTR(tab_kmers,"insert_Genomes_from_FASTxFiles()") uint64_t kmers_read = 0; uint64_t tmp_kmers_read = 0; for (i = 0; i < nb_files; i++){ //For each file in input size_buf_tmp = 0; kmers_read = 0; tmp_kmers_read = 0; nb_kmers_buf = 0; str_tmp = basename(filenames[i]); add_genomes_BFT_Root(1, &str_tmp, root); size_id_genome = get_nb_bytes_power2_annot(root->nb_genomes-1); int fp = open(filenames[i], O_RDONLY); //Open it kseq_t *seq = kseq_init(fp); //initialize the parser for this file int size_seq = kseq_read(seq, -1); //Start reading file, seq contains a buffer with a part of a sequence from the file printf("\nFile : %s\n\n", filenames[i]); while (size_seq > -1) { //While the end of the file is not reached if (size_seq > 0) size_buf_tmp = 0; //New sequence int current_buf_length = seq->seq.l - seq->seq.z; //Number of characters put into the seq buffer if (current_buf_length > 0){ //If the seq buffer is not empty nb_kmers_buf = MAX(current_buf_length-size_kmer+1, 0); //Number of kmers it is possible to read in seq buffer if (size_buf_tmp == 0){ //If the number of characters in the temporary buffer is 0 if (nb_kmers_buf != 0){ //If there is at least one kmer in the seq buffer memcpy(buf_tmp, &(seq->seq.s[nb_kmers_buf]), size_kmer-1); //Copy the last size_kmer-1 characters of seq-buffer into buf_tmp size_buf_tmp = (size_kmer-1); } else{ memcpy(buf_tmp, &(seq->seq.s[0]), current_buf_length); //Copy the content of seq buffer into buf_tmp size_buf_tmp = current_buf_length; } } else { //If the number of characters in the temporary buffer is not 0 //Insertion of kmers overlapping the last buffer and the current one (they are in buf_tmp) int size_to_copy = MIN(size_kmer-1, current_buf_length); memcpy(&(buf_tmp[size_buf_tmp]), seq->seq.s, size_to_copy); size_buf_tmp += size_to_copy; int nb_kmers = size_buf_tmp - size_kmer + 1; if (nb_kmers > 0){ parseSequenceBuffer(buf_tmp, tab_kmers, &nb_kmers, size_kmer, nb_cell_kmer); //Read buf_tmp, extract the kmers in tab_kmers insertKmers(root, tab_kmers, size_kmer, nb_kmers, root->nb_genomes-1, size_id_genome, 0); //Insert the kmers into the tree memset(tab_kmers, 0, nb_kmers*nb_cell_kmer*sizeof(uint8_t)); //Reinit tab_kmers tmp_kmers_read = nb_kmers; } else tmp_kmers_read = 0; if (nb_kmers_buf != 0){ memcpy(buf_tmp, &(seq->seq.s[nb_kmers_buf]), size_kmer-1); size_buf_tmp = size_kmer-1; } else{ memcpy(buf_tmp, &(seq->seq.s[0]), current_buf_length); size_buf_tmp = current_buf_length; } } //Extraction of buffer's kmers. Insertion in the tree. if (nb_kmers_buf > 0){ parseSequenceBuffer(seq->seq.s, tab_kmers, &nb_kmers_buf, size_kmer, nb_cell_kmer); insertKmers(root, tab_kmers, size_kmer, nb_kmers_buf, root->nb_genomes-1, size_id_genome, 0); memset(tab_kmers, 0, nb_kmers_buf*nb_cell_kmer*sizeof(uint8_t)); tmp_kmers_read += nb_kmers_buf; } //Display how many kmers were read if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+tmp_kmers_read)%PRINT_EVERY_X_KMERS)) printf("%" PRIu64 " kmers read\n", kmers_read+tmp_kmers_read); kmers_read += tmp_kmers_read; } size_seq = kseq_read(seq, size_seq); } if (root->treshold_compression != 0){ if ((root->nb_genomes-1 > 5) && ((root->nb_genomes-1)%root->treshold_compression == 0)){ load_annotation_from_Node(&(root->node), size_kmer, info_per_lvl, &PJArray, root->comp_set_colors); comp_set_colors_tmp = root->comp_set_colors; length_comp_set_colors_tmp = root->length_comp_set_colors; memory_Used* mem = printMemoryUsedFromNode(&(root->node), lvl_root, root->k, root->info_per_lvl); root->comp_set_colors = sort_annotations(&PJArray, &(root->length_comp_set_colors), mem->size_biggest_annot); free(mem); compress_annotation_from_Node(&(root->node), size_kmer, info_per_lvl, &PJArray, comp_set_colors_tmp); free_annotation_array_elem(&comp_set_colors_tmp, length_comp_set_colors_tmp); #if defined (_WORDx86) Word_t * PValue; uint8_t* it_index = calloc((len_longest_annot + CEIL(len_longest_annot, SIZE_BITS_UINT_8T) + 4), sizeof(uint8_t)); ASSERT_NULL_PTR(it_index, "sort_annotations()"); JSLF(PValue, PJArray, it_index); while (PValue != NULL){ free(*PValue); JSLN(PValue, PJArray, it_index); } free(it_index); #endif JSLFA(Rc_word, PJArray); } } kseq_destroy(seq); close(fp); gettimeofday(&tval_after, NULL); time_spent(&tval_last, &tval_after, &tval_result); printf("\nElapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); tval_last = tval_after; } memory_Used* mem = printMemoryUsedFromNode(&(root->node), size_kmer, info_per_lvl); printMemory(mem); free(mem); free(buf_tmp); free(tab_kmers);*/ return; } /*int queryBFT_kmerPresences_from_KmerFiles(BFT_Root* root, char* query_filename, int binary_file, char* output_filename){ ASSERT_NULL_PTR(root,"queryBFT_kmerPresences_from_KmerFiles()") ASSERT_NULL_PTR(query_filename,"queryBFT_kmerPresences_from_KmerFiles()") struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); const char comma = ','; //int annot_present; int size_annot; int size_annot_cplx; int size_annot_res; int i = 0; int j = 0; int k = 0; int nb_kmers_present = 0; int nb_bytes_kmer = CEIL(root->k*2, SIZE_BITS_UINT_8T); int nb_kmer_in_buf = SIZE_BUFFER/nb_bytes_kmer; int lvl_root = (root->k / NB_CHAR_SUF_PREF) - 1; uint64_t kmers_read = 0; FILE* file_query; FILE* file_output; resultPresence* res; size_t return_fread; uint8_t* annot; uint8_t* annot_ext; uint8_t* annot_cplx; uint8_t* annot_res = calloc(CEIL(root->nb_genomes+2, SIZE_BITS_UINT_8T), sizeof(uint8_t)); ASSERT_NULL_PTR(annot_res,"queryBFT_kmerPresences_from_KmerFiles()") uint8_t* array_kmers = calloc(SIZE_BUFFER, sizeof(uint8_t)); ASSERT_NULL_PTR(array_kmers,"queryBFT_kmerPresences_from_KmerFiles()") char* line = calloc(100, sizeof(char)); ASSERT_NULL_PTR(line,"queryBFT_kmerPresences_from_KmerFiles()") file_query = fopen(query_filename, "r"); ASSERT_NULL_PTR(file_query,"queryBFT_kmerPresences_from_KmerFiles()") file_output = fopen(output_filename, "w"); ASSERT_NULL_PTR(file_output,"queryBFT_kmerPresences_from_KmerFiles()") printf("\nQuerying BFT for k-mers in %s\n\n", query_filename); for (i=0; i<root->nb_genomes-1; i++){ fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i])-1, file_output); fwrite(&comma, sizeof(char), 1, file_output); } fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i]), file_output); if (binary_file){ if (fgets(line, 100, file_query) == NULL) ERROR("Cannot read header of the queries file") if (fgets(line, 100, file_query) == NULL) ERROR("Cannot read header of the queries file") while ((!ferror(file_query)) && (!feof(file_query))){ return_fread = fread(array_kmers, (size_t)nb_bytes_kmer, (size_t)nb_kmer_in_buf, file_query); for (k=0; k<(int)return_fread; k++){ res = isKmerPresent(&(root->node), root, lvl_root, &(array_kmers[k*nb_bytes_kmer]), root->k); if (res->link_child != NULL){ if (res->posFilter2 != 0){ get_annot((UC*)res->container, &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } else{ get_annot(&(((UC*)((CC*)res->container)->children)[res->bucket]), &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } if (size_annot != 0){ memcpy(annot_res, annot, size_annot * sizeof(uint8_t)); size_annot_res = size_annot; } if ((annot_ext != NULL) && (annot_ext[0] != 0)){ memcpy(&(annot_res[size_annot]), annot_ext, sizeof(uint8_t)); size_annot_res++; } if (size_annot_cplx != 0){ memcpy(annot_res, annot_cplx, size_annot_cplx * sizeof(uint8_t)); size_annot_res = size_annot_cplx; } printAnnotation_CSV(file_output, annot_res, size_annot_res, NULL, 0, root->nb_genomes-1, root->comp_set_colors); nb_kmers_present++; } else { annot_res[0] = 0; printAnnotation_CSV(file_output, annot_res, 1, NULL, 0, root->nb_genomes-1, root->comp_set_colors); } free(res); } //if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+return_fread)%PRINT_EVERY_X_KMERS)) // printf("%" PRIu64 " kmers read\n", kmers_read+return_fread); kmers_read += return_fread; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } } else{ while (fgets(line, 100, file_query) != NULL){ if (parseKmerCount(line, root->k, array_kmers, k) == 1){ k += nb_bytes_kmer; j++; if (j == nb_kmer_in_buf){ for (i=0; i<nb_kmer_in_buf; i++){ res = isKmerPresent(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k); if (res->link_child != NULL){ if (res->posFilter2 != 0){ get_annot((UC*)res->container, &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } else{ get_annot(&(((UC*)((CC*)res->container)->children)[res->bucket]), &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } if (size_annot != 0){ memcpy(annot_res, annot, size_annot * sizeof(uint8_t)); size_annot_res = size_annot; } if ((annot_ext != NULL) && (annot_ext[0] != 0)){ memcpy(&(annot_res[size_annot]), annot_ext, sizeof(uint8_t)); size_annot_res++; } if (size_annot_cplx != 0){ memcpy(annot_res, annot_cplx, size_annot_cplx * sizeof(uint8_t)); size_annot_res = size_annot_cplx; } printAnnotation_CSV(file_output, annot_res, size_annot_res, NULL, 0, root->nb_genomes-1, root->comp_set_colors); nb_kmers_present++; } else { annot_res[0] = 0; printAnnotation_CSV(file_output, annot_res, 1, NULL, 0, root->nb_genomes-1, root->comp_set_colors); } free(res); } j = 0; k = 0; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); //if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+nb_kmer_in_buf)%PRINT_EVERY_X_KMERS)) // printf("%" PRIu64 " kmers read\n", kmers_read+nb_kmer_in_buf); kmers_read += nb_kmer_in_buf; } } } for (i=0; i<j; i++){ res = isKmerPresent(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k); if (res->link_child != NULL){ if (res->posFilter2 != 0){ get_annot((UC*)res->container, &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } else{ get_annot(&(((UC*)((CC*)res->container)->children)[res->bucket]), &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } if (size_annot != 0){ memcpy(annot_res, annot, size_annot * sizeof(uint8_t)); size_annot_res = size_annot; } if ((annot_ext != NULL) && (annot_ext[0] != 0)){ memcpy(&(annot_res[size_annot]), annot_ext, sizeof(uint8_t)); size_annot_res++; } if (size_annot_cplx != 0){ memcpy(annot_res, annot_cplx, size_annot_cplx * sizeof(uint8_t)); size_annot_res = size_annot_cplx; } printAnnotation_CSV(file_output, annot_res, size_annot_res, NULL, 0, root->nb_genomes-1, root->comp_set_colors); nb_kmers_present++; } else { annot_res[0] = 0; printAnnotation_CSV(file_output, annot_res, 1, NULL, 0, root->nb_genomes-1, root->comp_set_colors); } free(res); } memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } fclose(file_query); fclose(file_output); free(array_kmers); free(annot_res); free(line); gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("\nElapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); return nb_kmers_present; }*/ int queryBFT_kmerPresences_from_KmerFiles(BFT_Root* root, char* query_filename, int binary_file, char* output_filename){ ASSERT_NULL_PTR(root,"queryBFT_kmerPresences_from_KmerFiles()") ASSERT_NULL_PTR(query_filename,"queryBFT_kmerPresences_from_KmerFiles()") struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); const char nl = '\n'; const char eol = '\0'; const char csv_sep = ','; const char not_present = '0'; const char present = '1'; int j = 0, k = 0; int res_parseKmerCount; int nb_kmers_present = 0; int nb_bytes_kmer = CEIL(root->k * 2, SIZE_BITS_UINT_8T); int nb_kmer_in_buf = SIZE_BUFFER/nb_bytes_kmer; int lvl_root = (root->k / NB_CHAR_SUF_PREF) - 1; ssize_t res_get_line; size_t return_fread; size_t size_buffer_queries = SIZE_BUFFER; uint32_t i, it_annot, it_csv_line_res; uint32_t* ids_present; uint64_t kmers_read = 0; BFT_kmer* bft_kmer; BFT_annotation* bft_annot; bool* is_iupac; char* buffer_queries = calloc(size_buffer_queries, sizeof(char)); ASSERT_NULL_PTR(buffer_queries,"query_sequences_outputCSV()\n"); char* csv_line_res = calloc(root->nb_genomes * 2, sizeof(char)); ASSERT_NULL_PTR(csv_line_res,"query_sequences_outputCSV()\n"); uint8_t* array_kmers = calloc(SIZE_BUFFER, sizeof(uint8_t)); ASSERT_NULL_PTR(array_kmers,"queryBFT_kmerPresences_from_KmerFiles()") FILE* file_query = fopen(query_filename, "r"); ASSERT_NULL_PTR(file_query,"queryBFT_kmerPresences_from_KmerFiles()") FILE* file_output = fopen(output_filename, "w"); ASSERT_NULL_PTR(file_output,"queryBFT_kmerPresences_from_KmerFiles()") bft_kmer = create_empty_kmer(); printf("\nQuerying BFT for k-mers in %s\n\n", query_filename); for (i = 0; i < root->nb_genomes - 1; i++){ fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i]), file_output); fwrite(&csv_sep, sizeof(char), 1, file_output); csv_line_res[i * 2 + 1] = csv_sep; } csv_line_res[root->nb_genomes * 2 - 1] = nl; fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i]), file_output); if (fwrite(&nl, sizeof(char), 1, file_output) != 1) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); if (binary_file){ if (fgets(buffer_queries, 100, file_query) == NULL) ERROR("Cannot read header of the queries file") if (fgets(buffer_queries, 100, file_query) == NULL) ERROR("Cannot read header of the queries file") while ((!ferror(file_query)) && (!feof(file_query))){ return_fread = fread(array_kmers, (size_t)nb_bytes_kmer, (size_t)nb_kmer_in_buf, file_query); for (k = 0; k < (int)return_fread; k++){ bft_kmer->res = isKmerPresent(&(root->node), root, lvl_root, &(array_kmers[k * nb_bytes_kmer]), root->k); it_csv_line_res = 0; if (is_kmer_in_cdbg(bft_kmer)){ nb_kmers_present++; bft_annot = get_annotation(bft_kmer); ids_present = get_list_id_genomes(bft_annot, root); free_BFT_annotation(bft_annot); for (it_annot = 1; it_annot <= ids_present[0]; it_annot++){ for (i = 0; i < ids_present[it_annot] - (it_annot == 1 ? 0 : ids_present[it_annot - 1] + 1); i++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; csv_line_res[it_csv_line_res] = present; it_csv_line_res += 2; } for (it_annot = ids_present[ids_present[0]] + 1; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; free(ids_present); } else{ for (it_annot = 0; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; } if (fwrite(csv_line_res, sizeof(char), root->nb_genomes * 2, file_output) != root->nb_genomes * 2) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); free(bft_kmer->res); } kmers_read += return_fread; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } } else{ is_iupac = malloc(nb_kmer_in_buf * sizeof(bool)); ASSERT_NULL_PTR(is_iupac, "queryBFT_kmerPresences_from_KmerFiles()") res_get_line = getline(&buffer_queries, &size_buffer_queries, file_query); while ((res_get_line != -1) || j){ if (res_get_line != -1){ buffer_queries[strcspn(buffer_queries, "\r\n")] = '\0'; kmers_read++; res_parseKmerCount = parseKmerCount(buffer_queries, root->k, array_kmers, k); } else res_parseKmerCount = 1; if (res_parseKmerCount == 1){ if (res_get_line != -1){ is_iupac[j] = false; k += nb_bytes_kmer; j++; } if ((res_get_line == -1) || (j == nb_kmer_in_buf)){ for (k = 0; k < j; k++){ if (!is_iupac[k]){ bft_kmer->res = isKmerPresent(&(root->node), root, lvl_root, &array_kmers[k * nb_bytes_kmer], root->k); it_csv_line_res = 0; if (is_kmer_in_cdbg(bft_kmer)){ nb_kmers_present++; bft_annot = get_annotation(bft_kmer); ids_present = get_list_id_genomes(bft_annot, root); free_BFT_annotation(bft_annot); for (it_annot = 1; it_annot <= ids_present[0]; it_annot++){ for (i = 0; i < ids_present[it_annot] - (it_annot == 1 ? 0 : ids_present[it_annot - 1] + 1); i++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; csv_line_res[it_csv_line_res] = present; it_csv_line_res += 2; } for (it_annot = ids_present[ids_present[0]] + 1; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; free(ids_present); } else{ for (it_annot = 0; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; } free(bft_kmer->res); } else { it_csv_line_res = 0; for (it_annot = 0; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; } if (fwrite(csv_line_res, sizeof(char), root->nb_genomes * 2, file_output) != root->nb_genomes * 2) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); } j = 0; k = 0; memset(array_kmers, 0, SIZE_BUFFER * sizeof(uint8_t)); } } else is_iupac[j] = true; //if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+nb_kmer_in_buf)%PRINT_EVERY_X_KMERS)) // printf("%" PRIu64 " kmers read\n", kmers_read+nb_kmer_in_buf); res_get_line = getline(&buffer_queries, &size_buffer_queries, file_query); } free(is_iupac); } fseek(file_output, 0 - ((long int) sizeof(char)), SEEK_CUR); if (fwrite(&eol, sizeof(char), 1, file_output) != 1) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); fclose(file_query); fclose(file_output); free(bft_kmer); free(array_kmers); free(buffer_queries); free(csv_line_res); gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("\nElapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); return nb_kmers_present; } int queryBFT_kmerBranching_from_KmerFiles(BFT_Root* root, char* query_filename, int binary_file){ ASSERT_NULL_PTR(root,"queryBFT_kmerBranching_from_KmerFiles()") ASSERT_NULL_PTR(query_filename,"queryBFT_kmerBranching_from_KmerFiles()") struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); int i = 0; int j = 0; int k = 0; int count_branching_node = 0; int lvl_root = (root->k / NB_CHAR_SUF_PREF) - 1; int nb_bytes_kmer = CEIL(root->k*2, SIZE_BITS_UINT_8T); int nb_kmer_in_buf = SIZE_BUFFER/nb_bytes_kmer; uint64_t kmers_read = 0; FILE* file; size_t return_fread; uint8_t* array_kmers = calloc(SIZE_BUFFER, sizeof(uint8_t)); ASSERT_NULL_PTR(array_kmers,"queryBFT_kmerBranching_from_KmerFiles()") char* line = calloc(100, sizeof(char)); ASSERT_NULL_PTR(line,"queryBFT_kmerBranching_from_KmerFiles()") root->skip_sp = build_skip_nodes(&(root->node)); file = fopen(query_filename, "r"); ASSERT_NULL_PTR(file,"queryBFT_kmerBranching_from_KmerFiles()") printf("\nQuerying BFT for branching k-mers in %s\n\n", query_filename); if (binary_file){ if (fgets(line, 100, file) == NULL) ERROR("Cannot read header of the file") if (fgets(line, 100, file) == NULL) ERROR("Cannot read header of the file") while ((!ferror(file)) && (!feof(file))){ return_fread = fread(array_kmers, (size_t)nb_bytes_kmer, (size_t)nb_kmer_in_buf, file); for (k=0; k<(int)return_fread; k++){ if (isBranchingRight(&(root->node), root, lvl_root, &(array_kmers[k*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } else if (isBranchingLeft(&(root->node), root, lvl_root, &(array_kmers[k*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } } if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+return_fread)%PRINT_EVERY_X_KMERS)) printf("%" PRIu64 " kmers read\n", kmers_read+return_fread); kmers_read += return_fread; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } } else{ while (fgets(line, 100, file) != NULL){ if (parseKmerCount(line, root->k, array_kmers, k) == 1){ k += nb_bytes_kmer; j++; if (j == nb_kmer_in_buf){ for (i=0; i<nb_kmer_in_buf; i++){ if (isBranchingRight(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } else if (isBranchingLeft(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } } j = 0; k = 0; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+nb_kmer_in_buf)%PRINT_EVERY_X_KMERS)) printf("%" PRIu64 " kmers read\n", kmers_read+nb_kmer_in_buf); kmers_read += nb_kmer_in_buf; } } } for (i=0; i<j; i++){ if (isBranchingRight(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } else if (isBranchingLeft(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } } memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } fclose(file); free(array_kmers); free(line); if (root->skip_sp != NULL) free_skip_nodes(&(root->node), root->skip_sp); gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("\nElapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); return count_branching_node; } /*void par_insert_Genomes_from_KmerFiles(int nb_files, char** filenames, int binary_files, int size_kmer, int treshold_compression, char* prefix_output, int cut_lvl, int memory_limit){ ASSERT_NULL_PTR(filenames,"insert_Genomes_from_KmerFiles()") struct timeval tval_before, tval_after, tval_result; Pvoid_t* PJArray; Word_t Rc_word; annotation_array_elem* comp_set_colors_tmp; BFT_Root** root; FILE** file; uint8_t** array_kmers; char** line; char** output_filename; char** output_filename2; char* str_tmp; char* output_filename3; int* nb_bfts_on_disk; int steps = 2; int i, j, k; int lvl_root; int nb_bytes_kmer; int nb_kmer_in_buf; int it_thread, thread_id; int len_longest_annot; int len_output_filename; int len_output_filename2; int nb_merging; int nb_threads; int size_id_genome; int it_bft_thread; int length_comp_set_colors_tmp; size_t return_fread; uint64_t kmers_read; //if (memory_limit > 0) omp_set_num_threads(1); //else memory_limit = INT_MAX; omp_set_num_threads(1); #pragma omp parallel \ shared(line, array_kmers, file, tval_before, tval_after, tval_result, PJArray, nb_threads, nb_bfts_on_disk, \ lvl_root, nb_bytes_kmer, nb_kmer_in_buf, root, output_filename, output_filename2, len_output_filename,) \ private(i, j, k, thread_id, Rc_word, comp_set_colors_tmp, str_tmp, size_id_genome, return_fread, it_thread, \ length_comp_set_colors_tmp, len_longest_annot, kmers_read, len_output_filename2) { #pragma omp single { gettimeofday(&tval_before, NULL); nb_threads = omp_get_num_threads(); length_comp_set_colors_tmp = 0; size_id_genome = 0; comp_set_colors_tmp = NULL; len_output_filename = strlen(prefix_output); nb_bfts_on_disk = calloc(nb_threads, sizeof(int)); ASSERT_NULL_PTR(nb_bfts_on_disk, "par_insert_Genomes_from_KmerFiles()\n") output_filename = malloc(nb_threads * sizeof(char*)); ASSERT_NULL_PTR(output_filename, "par_insert_Genomes_from_KmerFiles()\n") output_filename2 = malloc(nb_threads * sizeof(char*)); ASSERT_NULL_PTR(output_filename2, "par_insert_Genomes_from_KmerFiles()\n") output_filename3 = malloc((len_output_filename + 30) * sizeof(char)); ASSERT_NULL_PTR(output_filename3, "merging_BFT()\n"); strcpy(output_filename3, prefix_output); strcpy(&output_filename3[strlen(output_filename3)], "_tmp"); line = malloc(nb_threads * sizeof(char*)); ASSERT_NULL_PTR(line, "par_insert_Genomes_from_KmerFiles()\n") array_kmers = malloc(nb_threads * sizeof(uint8_t*)); ASSERT_NULL_PTR(array_kmers, "par_insert_Genomes_from_KmerFiles()\n") file = malloc(nb_threads * sizeof(FILE*)); ASSERT_NULL_PTR(file, "par_insert_Genomes_from_KmerFiles()\n") PJArray = malloc(nb_threads * sizeof(PWord_t)); ASSERT_NULL_PTR(PJArray, "par_insert_Genomes_from_KmerFiles()\n") root = malloc(nb_threads * sizeof(BFT_Root*)); ASSERT_NULL_PTR(root, "par_insert_Genomes_from_KmerFiles()\n") for (it_thread = 0; it_thread < nb_threads; it_thread++){ line[it_thread] = calloc(100, sizeof(char)); ASSERT_NULL_PTR(line[it_thread], "par_insert_Genomes_from_KmerFiles()\n") array_kmers[it_thread] = calloc(SIZE_BUFFER, sizeof(uint8_t)); ASSERT_NULL_PTR(array_kmers[it_thread], "par_insert_Genomes_from_KmerFiles()\n") output_filename[it_thread] = malloc((len_output_filename + 30) * sizeof(char)); ASSERT_NULL_PTR(output_filename[it_thread], "merging_BFT()\n"); output_filename2[it_thread] = malloc((len_output_filename + 30) * sizeof(char)); ASSERT_NULL_PTR(output_filename2[it_thread], "merging_BFT()\n"); strcpy(output_filename[it_thread], prefix_output); strcpy(output_filename2[it_thread], prefix_output); PJArray[it_thread] = (PWord_t)NULL; root[it_thread] = createBFT_Root(size_kmer, treshold_compression, 0); } lvl_root = (root[0]->k / NB_CHAR_SUF_PREF) - 1; nb_bytes_kmer = CEIL(root[0]->k * 2, SIZE_BITS_UINT_8T); nb_kmer_in_buf = SIZE_BUFFER/nb_bytes_kmer; } #pragma omp for for (i = 0; i < nb_files; i++){ //For each file in input thread_id = omp_get_thread_num(); kmers_read = 0; k = 0; j = 0; str_tmp = basename(filenames[i]); add_genomes_BFT_Root(1, &str_tmp, root[thread_id]); size_id_genome = get_nb_bytes_power2_annot(root[thread_id]->nb_genomes-1); file[thread_id] = fopen(filenames[i], "r"); ASSERT_NULL_PTR(file[thread_id],"insert_Genomes_from_KmerFiles()") printf("Processing file %s\n", filenames[i]); if (binary_files){ if (fgets(line[thread_id], 100, file[thread_id]) != NULL) k = atoi(line[thread_id]); else ERROR("Cannot read header of the file") if (fgets(line[thread_id], 100, file[thread_id]) == NULL) ERROR("Cannot read header of the file") while ((return_fread = fread(array_kmers[thread_id], nb_bytes_kmer, nb_kmer_in_buf, file[thread_id])) == nb_kmer_in_buf) { insertKmers(root[thread_id], array_kmers[thread_id], return_fread, root[thread_id]->nb_genomes-1, size_id_genome); memset(array_kmers[thread_id], 0, SIZE_BUFFER * sizeof(uint8_t)); if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+return_fread)%PRINT_EVERY_X_KMERS)){ // printf("%" PRIu64 " kmers read\n", kmers_read+return_fread); if (((unsigned long long int)getCurrentRSS())/1024 >= memory_limit){ sprintf(&(output_filename[thread_id][len_output_filename]), "%d", thread_id); len_output_filename2 = strlen(output_filename[thread_id]); output_filename[thread_id][len_output_filename2] = '_'; sprintf(&(output_filename[thread_id][len_output_filename2+1]), "%d", nb_bfts_on_disk[thread_id]); nb_bfts_on_disk[thread_id]++; write_BFT_Root_sparse(root[thread_id], output_filename[thread_id], false); freeBFT_Root(root[thread_id]); root[thread_id] = createBFT_Root(size_kmer, treshold_compression, 0); str_tmp = basename(filenames[i]); add_genomes_BFT_Root(1, &str_tmp, root[thread_id]); } } kmers_read += return_fread; return_fread = 0; } insertKmers(root[thread_id], array_kmers[thread_id], return_fread, root[thread_id]->nb_genomes-1, size_id_genome); } else { while (fgets(line[thread_id], 100, file[thread_id]) != NULL){ if (parseKmerCount(line[thread_id], root[thread_id]->k, array_kmers[thread_id], k) == 1){ k += nb_bytes_kmer; j++; if (j == nb_kmer_in_buf){ insertKmers(root[thread_id], array_kmers[thread_id], nb_kmer_in_buf, root[thread_id]->nb_genomes-1, size_id_genome); j = 0; k = 0; memset(array_kmers[thread_id], 0, SIZE_BUFFER * sizeof(uint8_t)); if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+nb_kmer_in_buf)%PRINT_EVERY_X_KMERS)){ //printf("%" PRIu64 " kmers read\n", kmers_read+nb_kmer_in_buf); if (((unsigned long long int)getCurrentRSS())/1024 >= memory_limit){ sprintf(&(output_filename[thread_id][len_output_filename]), "%d", thread_id); len_output_filename2 = strlen(output_filename[thread_id]); output_filename[thread_id][len_output_filename2] = '_'; sprintf(&(output_filename[thread_id][len_output_filename2+1]), "%d", nb_bfts_on_disk[thread_id]); nb_bfts_on_disk[thread_id]++; write_BFT_Root_sparse(root[thread_id], output_filename[thread_id], false); freeBFT_Root(root[thread_id]); root[thread_id] = createBFT_Root(size_kmer, treshold_compression, 0); str_tmp = basename(filenames[i]); add_genomes_BFT_Root(1, &str_tmp, root[thread_id]); } } kmers_read += nb_kmer_in_buf; } } } insertKmers(root[thread_id], array_kmers[thread_id], j, root[thread_id]->nb_genomes-1, size_id_genome); kmers_read += j; memset(array_kmers[thread_id], 0, SIZE_BUFFER * sizeof(uint8_t)); } fclose(file[thread_id]); if (root[thread_id]->treshold_compression != 0){ if ((root[thread_id]->nb_genomes-1 > 5) && ((root[thread_id]->nb_genomes-1)%root[thread_id]->treshold_compression == 0)){ memory_Used* mem = printMemoryUsedFromNode(&(root[thread_id]->node), lvl_root, root[thread_id]->k, root[thread_id]->info_per_lvl); len_longest_annot = MAX(mem->size_biggest_annot+1, getMaxSize_annotation_array_elem(root[thread_id]->comp_set_colors)); free(mem); load_annotation_from_Node(&(root[thread_id]->node), lvl_root, root[thread_id]->k, len_longest_annot, root[thread_id]->info_per_lvl, &(PJArray[thread_id]), root[thread_id]->comp_set_colors, root[thread_id]->ann_inf); comp_set_colors_tmp = root[thread_id]->comp_set_colors; length_comp_set_colors_tmp = root[thread_id]->length_comp_set_colors; root[thread_id]->comp_set_colors = sort_annotations(&(PJArray[thread_id]), &(root[thread_id]->length_comp_set_colors), len_longest_annot); if (root[thread_id]->comp_set_colors != NULL){ compress_annotation_from_Node(&(root[thread_id]->node), lvl_root, root[thread_id]->k, root[thread_id]->info_per_lvl, &(PJArray[thread_id]), comp_set_colors_tmp, root[thread_id]->ann_inf); free_annotation_array_elem(&comp_set_colors_tmp, &length_comp_set_colors_tmp); } #if defined (_WORDx86) Word_t * PValue; uint8_t* it_index = calloc((len_longest_annot + CEIL(len_longest_annot, SIZE_BITS_UINT_8T) + 4), sizeof(uint8_t)); ASSERT_NULL_PTR(it_index, "sort_annotations()"); JSLF(PValue, PJArray[thread_id], it_index); while (PValue != NULL){ free(*PValue); JSLN(PValue, PJArray[thread_id], it_index); } free(it_index); #endif JSLFA(Rc_word, PJArray[thread_id]); } } gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); } #pragma omp for for (it_thread = 0; it_thread < nb_threads; it_thread++){ sprintf(&(output_filename[it_thread][len_output_filename]), "%d", it_thread); len_output_filename2 = strlen(output_filename[it_thread]); output_filename[it_thread][len_output_filename2] = '_'; sprintf(&(output_filename[it_thread][len_output_filename2+1]), "%d", nb_bfts_on_disk[it_thread]); nb_bfts_on_disk[it_thread]++; write_BFT_Root_sparse(root[it_thread], output_filename[it_thread], false); free(line[it_thread]); free(array_kmers[it_thread]); freeBFT_Root(root[it_thread]); } } free(PJArray); free(file); free(array_kmers); free(line); free(root); omp_set_nested(false); omp_set_num_threads(1); if (omp_get_num_threads() == 1){ strcpy(&output_filename[0][len_output_filename], "0_0"); strcpy(&output_filename2[0][len_output_filename], "0_0_pkd"); read_cut_BFT_Root(output_filename[0], output_filename2[0], cut_lvl, true); strcpy(&output_filename[0][len_output_filename], "0_0_pkd"); for (it_thread = 0; it_thread < nb_threads; it_thread++){ sprintf(&(output_filename2[it_thread][len_output_filename]), "%d", it_thread); len_output_filename2 = strlen(output_filename2[it_thread]); output_filename2[it_thread][len_output_filename2] = '_'; for (it_bft_thread = 0; it_bft_thread < nb_bfts_on_disk[it_thread]; it_bft_thread++){ if (it_thread || it_bft_thread){ sprintf(&(output_filename2[it_thread][len_output_filename2+1]), "%d", it_bft_thread); read_cut_BFT_Root(output_filename2[it_thread], output_filename3, cut_lvl, true); printf("%s - %s\n", output_filename[0], output_filename2[it_thread]); merging_BFT(output_filename[0], output_filename3, output_filename[0], cut_lvl, true); } } gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); } free(nb_bfts_on_disk); } else{ #pragma omp parallel \ shared(output_filename, output_filename2, len_output_filename, nb_bfts_on_disk, tval_before, tval_after, tval_result) \ private(it_thread, it_bft_thread, nb_merging, steps, len_output_filename2) { #pragma omp for for (it_thread = 0; it_thread < nb_threads; it_thread++){ steps = 2; nb_merging = nb_bfts_on_disk[it_thread] - 1; sprintf(&(output_filename[it_thread][len_output_filename]), "%d", it_thread); sprintf(&(output_filename2[it_thread][len_output_filename]), "%d", it_thread); len_output_filename2 = strlen(output_filename[it_thread]); output_filename[it_thread][len_output_filename2] = '_'; output_filename2[it_thread][len_output_filename2] = '_'; while (nb_merging > 0){ for (it_bft_thread = 0; it_bft_thread + steps/2 < nb_bfts_on_disk[it_thread]; it_bft_thread += steps){ sprintf(&(output_filename[it_thread][len_output_filename2+1]), "%d", it_bft_thread); sprintf(&(output_filename2[it_thread][len_output_filename2+1]), "%d", it_bft_thread + steps/2); printf("%s - %s\n", output_filename[it_thread], output_filename2[it_thread]); merging_BFT(output_filename[it_thread], output_filename2[it_thread], output_filename[it_thread], cut_lvl, false); nb_merging--; } steps *= 2; } gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); } } free(nb_bfts_on_disk); steps = 2; nb_merging = nb_threads - 1; while (nb_merging > 0){ #pragma omp parallel shared(nb_merging, steps, output_filename, output_filename2) private(it_thread, len_output_filename2) { #pragma omp for for (it_thread = 0; it_thread < nb_threads; it_thread += steps){ if (it_thread + steps/2 < nb_threads){ sprintf(&(output_filename[it_thread][len_output_filename]), "%d", it_thread); sprintf(&(output_filename2[it_thread][len_output_filename]), "%d", it_thread + steps/2); len_output_filename2 = strlen(output_filename[it_thread]); strcpy(&output_filename[it_thread][len_output_filename2], "_0"); strcpy(&output_filename2[it_thread][len_output_filename2], "_0"); merging_BFT(output_filename[it_thread], output_filename2[it_thread], output_filename[it_thread], cut_lvl, false); nb_merging--; } } } steps *= 2; gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); } } return; }*/ void query_sequences_outputCSV(BFT_Root* root, char* query_filename, char* output_filename, double threshold, bool canonical_search){ ASSERT_NULL_PTR(root, "query_sequences_outputCSV()\n") ASSERT_NULL_PTR(query_filename, "query_sequences_outputCSV()\n") ASSERT_NULL_PTR(output_filename, "query_sequences_outputCSV()\n"); struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); if (threshold <= 0) ERROR("query_sequences_outputCSV(): the threshold must be superior to 0.\n"); if (threshold > 1) ERROR("query_sequences_outputCSV(): the threshold must be inferior or equal to 1.\n"); const char nl = '\n'; const char eol = '\0'; const char csv_sep = ','; const char not_present = '0'; const char present = '1'; uint64_t nb_queries = 0; size_t size_buffer_queries = SIZE_BUFFER; uint32_t i, it_annot, it_csv_line_res; uint32_t* ids_present; char* buffer_queries = calloc(size_buffer_queries, sizeof(char)); ASSERT_NULL_PTR(buffer_queries,"query_sequences_outputCSV()\n"); char* csv_line_res = calloc(root->nb_genomes * 2, sizeof(char)); ASSERT_NULL_PTR(csv_line_res,"query_sequences_outputCSV()\n"); FILE* file_query = fopen(query_filename, "r"); ASSERT_NULL_PTR(file_query,"query_sequences_outputCSV()\n") FILE* file_output = fopen(output_filename, "w"); ASSERT_NULL_PTR(file_output,"query_sequences_outputCSV()\n") //prepare_shuffling_dictionary(); for (i = 0; i < root->nb_genomes - 1; i++){ fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i]), file_output); fwrite(&csv_sep, sizeof(char), 1, file_output); csv_line_res[i * 2 + 1] = csv_sep; } csv_line_res[root->nb_genomes * 2 - 1] = nl; fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i]), file_output); if (fwrite(&nl, sizeof(char), 1, file_output) != 1) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); while (getline(&buffer_queries, &size_buffer_queries, file_query) != -1){ buffer_queries[strcspn(buffer_queries, "\r\n")] = '\0'; it_csv_line_res = 0; ids_present = query_sequence(root, buffer_queries, threshold, canonical_search); if ((ids_present != NULL) && ids_present[0]){ for (it_annot = 1; it_annot <= ids_present[0]; it_annot++){ for (i = 0; i < ids_present[it_annot] - (it_annot == 1 ? 0 : ids_present[it_annot - 1] + 1); i++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; csv_line_res[it_csv_line_res] = present; it_csv_line_res += 2; } for (it_annot = ids_present[ids_present[0]] + 1; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; } else{ for (it_annot = 0; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; } if (fwrite(csv_line_res, sizeof(char), root->nb_genomes * 2, file_output) != root->nb_genomes * 2) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); if (ids_present != NULL) free(ids_present); nb_queries++; } fseek(file_output, 0 - ((long int) sizeof(char)), SEEK_CUR); if (fwrite(&eol, sizeof(char), 1, file_output) != 1) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); free(csv_line_res); free(buffer_queries); fclose(file_query); fclose(file_output); gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("\nFile %s has been processed.\n", query_filename); printf("Elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); return; }
GB_binop__div_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__div_uint16 // A.*B function (eWiseMult): GB_AemultB__div_uint16 // A*D function (colscale): GB_AxD__div_uint16 // D*A function (rowscale): GB_DxB__div_uint16 // C+=B function (dense accum): GB_Cdense_accumB__div_uint16 // C+=b function (dense accum): GB_Cdense_accumb__div_uint16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_uint16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_uint16 // C=scalar+B GB_bind1st__div_uint16 // C=scalar+B' GB_bind1st_tran__div_uint16 // C=A+scalar GB_bind2nd__div_uint16 // C=A'+scalar GB_bind2nd_tran__div_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IDIV_UNSIGNED (x, y, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT16 || GxB_NO_DIV_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__div_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__div_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__div_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__div_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__div_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__div_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__div_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__div_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__div_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__div_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 16) ; \ } GrB_Info GB_bind1st_tran__div_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 16) ; \ } GrB_Info GB_bind2nd_tran__div_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
smoothing_utility.h
/* ============================================================================== KratosStructuralApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel pooyan@cimne.upc.edu rrossi@cimne.upc.edu janosch.stascheit@rub.de nagel@sd.rub.de - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain - Ruhr-University Bochum, Institute for Structural Mechanics, Germany Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ /* ********************************************************* * * Last Modified by: $Author: Nelson Lafontaine $ * Date: $Date: 01-27-2010$ * Revision: $Revision: 1.00 $ * * ***********************************************************/ #if !defined(SMOOTHING_UTILITY) #define SMOOTHING_UTILITY #ifdef _OPENMP #include <omp.h> #endif #include "boost/smart_ptr.hpp" #include <boost/timer.hpp> #include <boost/numeric/ublas/matrix.hpp> #include <boost/numeric/ublas/vector.hpp> #include <boost/numeric/ublas/banded.hpp> #include <boost/numeric/ublas/matrix_sparse.hpp> #include <boost/numeric/ublas/triangular.hpp> #include <boost/numeric/ublas/operation.hpp> #include <boost/numeric/ublas/lu.hpp> /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/variables.h" #include "containers/array_1d.h" #include "processes/find_nodal_neighbours_process.h" #include "processes/find_elements_neighbours_process.h" #include "structural_application.h" #include "utilities/math_utils.h" #include "custom_utilities/sd_math_utils.h" #include <cmath> #include <algorithm> namespace Kratos { enum polynomial_degree {Linear = 1, Cuadratic, Cubic, Cuartic}; class Smoothing_Utility { public: typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef boost::numeric::ublas::vector<Matrix> Matrix_Order_Tensor; typedef boost::numeric::ublas::vector<Vector> Vector_Order_Tensor; Smoothing_Utility( ModelPart& model_part, int domain_size ) : mr_model_part( model_part ) { mdomain_size = domain_size; minitialize_Setting_Variables = false; } ~Smoothing_Utility() {} inline void CreatePartition( unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions ) { partitions.resize( number_of_threads + 1 ); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for ( unsigned int i = 1; i < number_of_threads; i++ ) partitions[i] = partitions[i-1] + partition_size ; } // it computes the triburary volume or area of each nodes. // only its valid for triangle and tetrahedra void CalculatetributaryFactor ( ModelPart& this_model_part, const unsigned int& domain_size ) { ElementsArrayType& pElements = this_model_part.Elements(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; CreatePartition( number_of_threads, pElements.size(), element_partition ); switch(domain_size) { case 2: { const double fact = 1.00/3.00; #pragma omp parallel for for ( int k = 0; k < number_of_threads; k++ ) { ElementsArrayType::iterator it_begin = pElements.ptr_begin() + element_partition[k]; ElementsArrayType::iterator it_end = pElements.ptr_begin() + element_partition[k+1]; for ( ElementsArrayType::iterator it = it_begin; it != it_end; ++it ) { Element::GeometryType& geom = it->GetGeometry(); for ( unsigned int i = 0; i < geom.size(); i++ ) { geom[i].SetLock(); double& Nodal_Area = geom[i].GetValue(NODAL_AREA); Nodal_Area += fact * geom.Area(); geom[i].UnSetLock(); } } } break; } ///******************************************************************************************** case 3: { const double fact = 0.25; #pragma omp parallel for for ( int k = 0; k < number_of_threads; k++ ) { ElementsArrayType::iterator it_begin = pElements.ptr_begin() + element_partition[k]; ElementsArrayType::iterator it_end = pElements.ptr_begin() + element_partition[k+1]; for ( ElementsArrayType::iterator it = it_begin; it != it_end; ++it ) { Element::GeometryType& geom = it->GetGeometry(); for ( unsigned int i = 0; i < geom.size(); i++ ) { geom[i].SetLock(); double& Nodal_Volume = geom[i].GetValue(NODAL_VOLUME); Nodal_Volume += fact * geom.Volume(); geom[i].UnSetLock(); } } } break; } } } template<class TVariableType> void WeightedRecoveryGradients(const Variable<TVariableType>& rVariable, Variable<TVariableType>& rVariable_Smooth, ModelPart& this_model_part, const unsigned int& domain_size) { std::cout<< "SMOTHING SOLUTIONS VARIABLE = " << rVariable << std::endl; if( minitialize_Setting_Variables==false) { SettingNodalValues(this_model_part, domain_size); CalculatetributaryFactor(this_model_part, domain_size); minitialize_Setting_Variables = true; } switch(domain_size) { case 2: { WeightedRecoveryGradients2D(rVariable, rVariable_Smooth, this_model_part); break; } case 3: { WeightedRecoveryGradients3D(rVariable, rVariable_Smooth, this_model_part); break; } } } ///WARNING = Only valid for triangle elements template<class TVariableType> void WeightedRecoveryGradients2D(const Variable<TVariableType>& rVariable, const Variable<TVariableType>& rVariable_Smooth, ModelPart& this_model_part) { KRATOS_TRY ProcessInfo& CurrentProcessInfo = this_model_part.GetProcessInfo(); ElementsArrayType& pElements = this_model_part.Elements(); NodesArrayType& pNodes = this_model_part.Nodes(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; CreatePartition( number_of_threads, pElements.size(), element_partition ); vector<unsigned int> node_partition; CreatePartition( number_of_threads, pNodes.size(), node_partition ); const double fact = 1.00/3.00; std::vector<TVariableType> Variable_Value; #pragma omp parallel for private(Variable_Value) for ( int k = 0; k < number_of_threads; k++ ) { ElementsArrayType::iterator it_begin = pElements.ptr_begin() + element_partition[k]; ElementsArrayType::iterator it_end = pElements.ptr_begin() + element_partition[k+1]; for ( ElementsArrayType::iterator it = it_begin; it != it_end; ++it ) { Element::GeometryType& geom = it->GetGeometry(); it->GetValueOnIntegrationPoints(rVariable, Variable_Value, CurrentProcessInfo ); for ( unsigned int i = 0; i < geom.size(); i++ ) { geom[i].SetLock(); TVariableType& Nodal_Values = geom(i)->GetValue(rVariable_Smooth); Nodal_Values += fact * geom.Area() * Variable_Value[0]; ///un por integrations points // if(geom(i)->Id()==496){ // KRATOS_WATCH(it->Id()) // KRATOS_WATCH(fact) // KRATOS_WATCH(geom.Area()) // KRATOS_WATCH(Variable_Value[0]) // KRATOS_WATCH(Nodal_Values) // } geom[i].UnSetLock(); } } } #pragma omp parallel for for ( int k = 0; k < number_of_threads; k++ ) { NodesArrayType::iterator i_begin = pNodes.ptr_begin() + node_partition[k]; NodesArrayType::iterator i_end = pNodes.ptr_begin() + node_partition[k+1]; for (ModelPart::NodeIterator i = i_begin; i != i_end; ++i ) { double& Area_Total = i->GetValue(NODAL_AREA ); TVariableType& Nodal_Values = i->GetValue(rVariable_Smooth); Nodal_Values = (1.00 / Area_Total) * Nodal_Values; // if(i->Id()==496){ // KRATOS_WATCH(Area_Total) // KRATOS_WATCH(Nodal_Values) // } } } KRATOS_CATCH( "" ) } // Only valid for tetrahedra elements // WARNING = NODAL_AREA SHOULD BE NODAL_VOLUME template<class TVariableType> void WeightedRecoveryGradients3D(const Variable<TVariableType>& rVariable, const Variable<TVariableType>& rVariable_Smooth, ModelPart& this_model_part) { KRATOS_TRY ProcessInfo& CurrentProcessInfo = this_model_part.GetProcessInfo(); ElementsArrayType& pElements = this_model_part.Elements(); NodesArrayType& pNodes = this_model_part.Nodes(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; CreatePartition( number_of_threads, pElements.size(), element_partition ); const double fact = 0.25; std::vector<TVariableType> Variable_Value; #pragma omp parallel for private(Variable_Value) for ( int k = 0; k < number_of_threads; k++ ) { ElementsArrayType::iterator it_begin = pElements.ptr_begin() + element_partition[k]; ElementsArrayType::iterator it_end = pElements.ptr_begin() + element_partition[k+1]; for ( ElementsArrayType::iterator it = it_begin; it != it_end; ++it ) { Element::GeometryType& geom = it->GetGeometry(); it->GetValueOnIntegrationPoints(rVariable, Variable_Value, CurrentProcessInfo ); for ( unsigned int i = 0; i < geom.size(); i++ ) { geom[i].SetLock(); geom[i].GetValue(rVariable_Smooth) += fact * geom.Volume() * Variable_Value[0]; geom[i].UnSetLock(); } } } vector<unsigned int> node_partition; CreatePartition( number_of_threads, pNodes.size(), node_partition ); #pragma omp parallel for for ( int k = 0; k < number_of_threads; k++ ) { NodesArrayType::iterator i_begin = pNodes.ptr_begin() + node_partition[k]; NodesArrayType::iterator i_end = pNodes.ptr_begin() + node_partition[k+1]; for ( ModelPart::NodeIterator i = i_begin; i != i_end; ++i ) { double& Vol_Total = i->GetValue(NODAL_VOLUME); TVariableType& Nodal_Values = i->GetValue(rVariable_Smooth); Nodal_Values = (1.00 / Vol_Total) * Nodal_Values; } } KRATOS_CATCH( "" ) } template<class TVariableType> void InterpolatedRecoveryGradients(const Variable<TVariableType>& rVariable, Variable<TVariableType>& rVariable_Smooth, ModelPart& this_model_part, const unsigned int& domain_size) { KRATOS_TRY NodesArrayType& pNodes = this_model_part.Nodes(); ProcessInfo& CurrentProcessInfo = this_model_part.GetProcessInfo(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition( number_of_threads, pNodes.size(), node_partition ); // Variables Globales std::vector<TVariableType> Output_Values; array_1d<double, 3> Coord_Point = ZeroVector(3); array_1d<double, 3> Coord_Node = ZeroVector(3); Vector Polynomial; unsigned int size_2 = 3; if ( domain_size == 3) size_2 = 6; bool init = false; Matrix Value; Matrix_Order_Tensor Aux_b; Vector_Order_Tensor Aux_Poly; Vector_Order_Tensor a; std::vector<int> work_array_elem; for ( ModelPart::NodesContainerType::iterator i = this_model_part.NodesBegin(); i != this_model_part.NodesEnd(); i++ ) { WeakPointerVector< Element > rneigh_el = i->GetValue( NEIGHBOUR_ELEMENTS ); // elementos vecinos al nodo unsigned int num_of_elem_layer_one = std::distance(rneigh_el.begin(), rneigh_el.end()); // Realizaremos una interpolacion cuadratica. En caso especial, donde el numero de elementos // vecinos sean menor que 10 usaremos como ultima intancia una aproximacion lineal. if ( num_of_elem_layer_one < 13 ) { Compute_First_and_Second_Neighbour(i, work_array_elem ); num_of_elem_layer_one = work_array_elem.size(); Aux_b.resize( num_of_elem_layer_one ); Aux_Poly.resize( num_of_elem_layer_one ); for ( unsigned int i_elem = 0; i_elem < work_array_elem.size(); i_elem ++ ) { unsigned int elem = work_array_elem[i_elem]; Element::GeometryType& geom = ( this_model_part.Elements()[elem] ).GetGeometry(); Find_Coord_Gauss_Points( geom , Coord_Point ); CalculatePolynomialInterpolation( Polynomial, Coord_Point, num_of_elem_layer_one ); unsigned int size_Pol = Polynomial.size(); if ( init == false ) { for ( unsigned k = 0; k < num_of_elem_layer_one; k++ ) { Aux_Poly[k].resize( size_Pol ); Aux_b[k].resize( 1, size_2 ); } } (this_model_part.Elements()[elem]).GetValueOnIntegrationPoints(rVariable, Output_Values, CurrentProcessInfo ); noalias( Aux_b[i_elem] ) = Output_Values[0]; noalias( Aux_Poly[i_elem] ) = Polynomial; init = true; } } // Interpolacion Cuadratica else { unsigned int counter = 0; Aux_b.resize( num_of_elem_layer_one ); Aux_Poly.resize( num_of_elem_layer_one ); for ( WeakPointerVector<Element>::iterator it = rneigh_el.begin(); it != rneigh_el.end(); it++ ) { Element::GeometryType& geom = it->GetGeometry(); Find_Coord_Gauss_Points( geom , Coord_Point ); CalculatePolynomialInterpolation( Polynomial, Coord_Point, num_of_elem_layer_one ); unsigned int size_Pol = Polynomial.size(); if ( init == false ) { for ( unsigned k = 0; k < num_of_elem_layer_one; k++ ) { Aux_Poly[k].resize( size_Pol ); Aux_b[k].resize( 1, size_2 ); } } it->GetValueOnIntegrationPoints(rVariable, Output_Values, CurrentProcessInfo ); noalias( Aux_b[counter] ) = Output_Values[0]; noalias( Aux_Poly[counter] ) = Polynomial; init = true; counter++; } } Solve( Aux_b, Aux_Poly, a ); Coord_Node[0] = this_model_part.Nodes()[i->Id()].X(); Coord_Node[1] = this_model_part.Nodes()[i->Id()].Y(); Coord_Node[2] = this_model_part.Nodes()[i->Id()].Z(); ///computing average Compute_Interpolated_Sigma(Coord_Node, a, num_of_elem_layer_one, Value); i->GetValue(rVariable_Smooth) = Value; work_array_elem.clear(); init = false; } KRATOS_CATCH( "" ) } //WARNING = Solo para dos Dimesiones void CalculatePolynomialInterpolation( Vector& P, array_1d<double, 3>& Coord_Point, const unsigned int degree ) { unsigned int dim = 0; unsigned int degree_aux = 0; // cantidad de puntos disponibles if ( degree >= 3 && degree < 15 ) // Linear { degree_aux = 1; } else if ( degree >= 15 && degree < 40 ) // Cuadratic { degree_aux = 2; } else if ( degree >= 40 ) // Cubic { degree_aux = 3; } else if(degree>80) // Cuartic { degree_aux=4; } else { std::cout << "Warning: No se puede realizar la interpolacion deseada" << std::endl; } mpolynomial_degree = static_cast<polynomial_degree>( degree_aux ); switch ( mpolynomial_degree ) { case Linear: { dim = 3; P.resize( dim ); P[0] = 1.00; P[1] = Coord_Point[0]; P[2] = Coord_Point[1]; break; } case Cuadratic: { dim = 6; P.resize( dim ); P[0] = 1.00; P[1] = Coord_Point[0]; P[2] = Coord_Point[1]; P[3] = Coord_Point[0] * Coord_Point[1]; P[4] = Coord_Point[0] * Coord_Point[0]; P[5] = Coord_Point[1] * Coord_Point[1]; break; } case Cubic: { dim = 10; P.resize( dim ); P[0] = 1.00; P[1] = Coord_Point[0]; P[2] = Coord_Point[1]; P[3] = Coord_Point[0] * Coord_Point[1]; P[4] = Coord_Point[0] * Coord_Point[0]; P[5] = Coord_Point[1] * Coord_Point[1]; P[6] = Coord_Point[0] * Coord_Point[0] * Coord_Point[1]; P[7] = Coord_Point[1] * Coord_Point[1] * Coord_Point[0]; P[8] = Coord_Point[0] * Coord_Point[0] * Coord_Point[0]; P[9] = Coord_Point[1] * Coord_Point[1] * Coord_Point[1]; break; } case Cuartic: { dim = 15; P.resize( dim ); P[0] = 1.00; P[1] = Coord_Point[0]; P[2] = Coord_Point[1]; P[3] = Coord_Point[0] * Coord_Point[1]; P[4] = Coord_Point[0] * Coord_Point[0]; P[5] = Coord_Point[1] * Coord_Point[1]; P[6] = Coord_Point[0] * Coord_Point[0] * Coord_Point[1]; P[7] = Coord_Point[1] * Coord_Point[1] * Coord_Point[0]; P[8] = Coord_Point[0] * Coord_Point[0] * Coord_Point[0]; P[9] = Coord_Point[1] * Coord_Point[1] * Coord_Point[1]; P[10] = Coord_Point[0] * Coord_Point[0] * Coord_Point[1] * Coord_Point[1]; P[11] = Coord_Point[0] * Coord_Point[0] * Coord_Point[0] * Coord_Point[1]; P[12] = Coord_Point[1] * Coord_Point[1] * Coord_Point[1] * Coord_Point[0]; P[13] = Coord_Point[0] * Coord_Point[0] * Coord_Point[0] * Coord_Point[0]; P[14] = Coord_Point[1] * Coord_Point[1] * Coord_Point[1] * Coord_Point[1]; break; } default: { std::cout << "WARNING: CASE NOT VALID" << std::endl; } } } ///WARNING = Valid for tethaedra and triangle only void Find_Coord_Gauss_Points( Element::GeometryType& geom, array_1d<double, 3>& Coord_Point ) { double x = 0.00; double y = 0.00; double z = 0.00; double fact = 1.00/3.00; if ( geom.size() == 4 ) { fact = 0.25; } Coord_Point = ZeroVector( 3 ); for ( unsigned int i = 0; i < geom.size(); i++ ) { x = geom[i].X(); y = geom[i].Y(); z = geom[i].Z(); Coord_Point[0] += x; Coord_Point[1] += y; Coord_Point[2] += z; } noalias( Coord_Point ) = Coord_Point * fact; } void Compute_First_and_Second_Neighbour( ModelPart::NodesContainerType::iterator& this_node, std::vector<int>& work_array ) { KRATOS_TRY work_array.reserve( 1000 ); WeakPointerVector< Node<3> >& neighb_nodes = this_node->GetValue( NEIGHBOUR_NODES ); WeakPointerVector< Element >& neighb_elems = this_node->GetValue( NEIGHBOUR_ELEMENTS ); //filling the first neighbours list for ( WeakPointerVector<Element>::iterator i = neighb_elems.begin(); i != neighb_elems.end(); i++ ) { int index_j = i->Id(); work_array.push_back( index_j ); } //adding the second neighbours for ( WeakPointerVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++ ) { WeakPointerVector< Element > second_neighb_elems = i->GetValue( NEIGHBOUR_ELEMENTS ); for ( WeakPointerVector<Element>::iterator j = second_neighb_elems.begin(); j != second_neighb_elems.end(); j++ ) { int second_neighb_index = j->Id(); work_array.push_back( second_neighb_index ); } } //sorting the indices and elminating the duplicates std::sort( work_array.begin(), work_array.end() ); std::vector<int>::iterator new_end = std::unique( work_array.begin(), work_array.end() ); unsigned int number_of_entries = new_end - work_array.begin(); work_array.resize( number_of_entries, false ); KRATOS_CATCH( "" ) } void SettingNodalValues( ModelPart& this_model_part, const unsigned int& domain_size ) { NodesArrayType& pNodes = this_model_part.Nodes(); unsigned int size = 3; if (domain_size == 3 ) size = 6; const Matrix Nodal_Values = ZeroMatrix(1, size ); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition( number_of_threads, pNodes.size(), node_partition ); switch(domain_size) { case 2: { #pragma omp parallel for for ( int k = 0; k < number_of_threads; k++ ) { NodesArrayType::iterator i_begin = pNodes.ptr_begin() + node_partition[k]; NodesArrayType::iterator i_end = pNodes.ptr_begin() + node_partition[k+1]; for ( ModelPart::NodeIterator i = i_begin; i != i_end; ++i ) { { i->GetValue( NODAL_AREA ) = 0.00; i->GetValue(NODAL_DAMAGE ) = 0.00; i->GetValue( SPLIT_NODAL ) = false; i->GetValue( NODAL_STRESS ) = Nodal_Values; i->GetValue( NODAL_STRAIN ) = Nodal_Values; } } } break; } case 3: { #pragma omp parallel for for ( int k = 0; k < number_of_threads; k++ ) { NodesArrayType::iterator i_begin = pNodes.ptr_begin() + node_partition[k]; NodesArrayType::iterator i_end = pNodes.ptr_begin() + node_partition[k+1]; for ( ModelPart::NodeIterator i = i_begin; i != i_end; ++i ) { { i->GetValue( NODAL_VOLUME) = 0.00; i->GetValue(NODAL_DAMAGE ) = 0.00; i->GetValue( SPLIT_NODAL ) = false; i->GetValue( NODAL_STRESS ) = Nodal_Values; i->GetValue( NODAL_STRAIN ) = Nodal_Values; } } } break; } } } void Solve( Matrix_Order_Tensor& Aux_b, Vector_Order_Tensor& Aux_Poly, Vector_Order_Tensor& Result ) { Matrix A; Matrix A_inv; Vector b; Vector a; a = ZeroVector( Aux_Poly[0].size() ); b = ZeroVector( Aux_Poly[0].size() ); A = ZeroMatrix( Aux_Poly[0].size(), Aux_Poly[0].size() ); A_inv = ZeroMatrix( Aux_Poly[0].size(), Aux_Poly[0].size() ); Result.resize( Aux_b[0].size2() ); for ( unsigned int j = 0; j < Aux_Poly.size(); j++ ) //Numeros de elementos { noalias( A ) = A + outer_prod( Aux_Poly[j], Aux_Poly[j] ); } int singular = SD_MathUtils<double>::InvertMatrix( A, A_inv ); if ( singular == 1 ) { KRATOS_WATCH( "MATRIX SINGULAR: MORE POINTS FOR EXTRAPOLATIONS" ) } for ( unsigned i = 0; i < Aux_b[0].size2(); i++ ) // Numeros de Sigma { Result[i].resize( Aux_Poly[0].size() ); for ( unsigned int j = 0; j < Aux_Poly.size(); j++ ) //Numeros de elementos { noalias( b ) = b + Aux_Poly[j] * Aux_b[j]( 0, i ); } noalias( a ) = prod( A_inv, b ); Result[i] = a; a = ZeroVector( Aux_Poly[0].size() ); b = ZeroVector( Aux_Poly[0].size() ); } } int InvertMatrix( const Matrix& input, Matrix& inverse ) { int singular = 0; using namespace boost::numeric::ublas; typedef permutation_matrix<std::size_t> pmatrix; Matrix A( input ); pmatrix pm( A.size1() ); singular = lu_factorize( A, pm ); inverse.assign( identity_matrix<double>( A.size1() ) ); lu_substitute( A, pm, inverse ); return singular; } void Compute_Interpolated_Sigma(array_1d<double, 3>& Coord_Point, Vector_Order_Tensor& a, const unsigned int degree, Matrix& Result ) { unsigned int size = a.size(); Vector Polynomial; Vector Aux; Result = ZeroMatrix( 1, size ); CalculatePolynomialInterpolation( Polynomial, Coord_Point, degree ); for ( unsigned int i = 0; i < size; i++ ) { Result( 0, i ) = inner_prod(Polynomial, a[i] ); } } // Calcula el area tributaria de los nodos que han sido creados void Recompute_Values_For_New_Mesh( ModelPart& this_model_part, const unsigned int domain_size ) { KRATOS_TRY ElementsArrayType& pElements = this_model_part.Elements(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; CreatePartition( number_of_threads, pElements.size(), element_partition ); switch(domain_size) { case 2: { double Area = 0.00; double fact = 1.00/3.00; #pragma omp parallel for private(Area) shared(fact) for ( int k = 0; k < number_of_threads; k++ ) { ElementsArrayType::iterator it_begin = pElements.ptr_begin() + element_partition[k]; ElementsArrayType::iterator it_end = pElements.ptr_begin() + element_partition[k+1]; for ( ElementsArrayType::iterator it = it_begin; it != it_end; ++it ) { Element::GeometryType& geom = it->GetGeometry(); // Nodos del elemento Area = geom.Area(); for ( unsigned int i = 0; i < geom.size(); i++ ) { geom[i].SetLock(); double& Nodal_Area = geom[i].GetValue(NODAL_AREA ); Nodal_Area = Nodal_Area + fact * Area; geom[i].GetValue(NODAL_AREA ) = Nodal_Area; geom[i].UnSetLock(); } } } break; } case 3: { double volume = 0.00; double fact = 0.25; #pragma omp parallel for private(volume) shared(fact) for ( int k = 0; k < number_of_threads; k++ ) { ElementsArrayType::iterator it_begin = pElements.ptr_begin() + element_partition[k]; ElementsArrayType::iterator it_end = pElements.ptr_begin() + element_partition[k+1]; for ( ElementsArrayType::iterator it = it_begin; it != it_end; ++it ) { Element::GeometryType& geom = it->GetGeometry(); // Nodos del elemento volume = geom.Volume(); for ( unsigned int i = 0; i < geom.size(); i++ ) { geom[i].SetLock(); double& Nodal_Volume = geom[i].GetValue( NODAL_VOLUME ); Nodal_Volume += fact * volume; geom[i].GetValue( NODAL_VOLUME ) = Nodal_Volume; geom[i].UnSetLock(); } } } break; } } KRATOS_CATCH( "" ) } void Finalize() { minitialize_Setting_Variables = false; } bool minitialize_Setting_Variables; polynomial_degree mpolynomial_degree; ModelPart& mr_model_part; unsigned int mdomain_size; }; } #endif
GB_unop__identity_int32_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_bool) // op(A') function: GB (_unop_tran__identity_int32_bool) // C type: int32_t // A type: bool // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = (int32_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_bool) ( int32_t *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tmcmc_stats.c
/* * tmcmc_stats.c * Pi4U * * Created by Panagiotis Hadjidoukas on 1/1/14. * Copyright 2014 ETH Zurich. All rights reserved. * */ #include <stdio.h> #include <math.h> #include "engine_tmcmc.h" #include <time.h> int display = 0; #define LARGE_SCALE_POPS /** OBJLOGP FUNCTION **/ double Objlogp(double x, double *fj, int fn, double pj, double tol) { int i; double fjmax = compute_max(fj, fn); #ifdef LARGE_SCALE_POPS double *weight = (double *)malloc(fn*sizeof(double)); #else double weight[fn]; #endif for (i = 0; i < fn; i++) weight[i] = exp((fj[i]-fjmax)*(x-pj)); double sum_weight = compute_sum(weight, fn); #ifdef LARGE_SCALE_POPS double *q = (double *)malloc(fn*sizeof(double)); #else double q[fn]; #endif for (i = 0; i < fn; i++) q[i] = weight[i]/sum_weight; double mean_q = compute_mean(q, fn); double std_q = compute_std(q, fn, mean_q); double CoefVar = pow(std_q/mean_q-tol, 2); /* result */ #ifdef LARGE_SCALE_POPS free(weight); free(q); #endif return CoefVar; } typedef struct fparam_s { double *fj; int fn; double pj; double tol; } fparam_t; fparam_t *sfp; double Objlogp_s(double *x, int n) { double *fj = sfp->fj; int fn = sfp->fn; double pj = sfp->pj; double tol = sfp->tol; return Objlogp(x[0], fj, fn, pj, tol); } double Objlogp_gsl(double x, void *param) { fparam_t *fp = (fparam_t *) param; double *fj = fp->fj; int fn = fp->fn; double pj = fp->pj; double tol = fp->tol; double res = Objlogp(x, fj, fn, pj, tol); /* printf("Objlogp(%lf)=%lf\n", x, res); */ return res; } double Objlogp_gsl2(const gsl_vector *v, void *param) { double x; x = gsl_vector_get(v, 0); return Objlogp_gsl(x, param); } /*** OPTIMIZATION ***/ int fzerofind(double *fj, int fn, double pj, double tol, double *xmin, double *fmin) { size_t iter = 0; /*size_t max_iter = data.options.MaxIter;*/ /* USER input - not used here */ double Tol = data.options.Tol; int Display = data.options.Display; double Step = data.options.Step; double x_lo = 0.0, x_hi = 4.0; int conv = 0; size_t niters; static int counter = -1; int first_try = 0; int dump = 0; FILE *fp = NULL; char fname[64]; counter++; retry: if (Display) printf("fminzero: x_lo = %e x_hi = %e Step = %e\n", x_lo, x_hi, Step); niters = (unsigned long) ((x_hi-x_lo) / Step); first_try++; if (first_try) dump=1; if (dump) { sprintf(fname, "fzero_%03d.txt", counter); fp = fopen(fname, "w"); } double m = 0; double fm = DBL_MAX; double t0 = torc_gettime(); int found = 0; #if !defined(_OPENMP) for (iter = 0; iter < niters; iter++) { double x = x_lo + iter*Step; double fx = Objlogp(x, fj, fn, pj, tol); if (dump) fprintf(fp, "%.16f %.16f\n", x, fx); if (fx < fm) { fm = fx; m = x; } if (fabs(fx) <= Tol) { found = 1; break; } } #else #pragma omp parallel { double lm = 0; double lfm = DBL_MAX; #pragma omp for for (iter = 0; iter < niters; iter++) { double x, fx; if (found == 0) { x = x_lo + iter*Step; fx = Objlogp(x, fj, fn, pj, tol); if (fx < lfm) { lfm = fx; lm = x; } if (fabs(fx) <= Tol) { found = 1; #pragma omp flush(found) } } /* task cancellation ? */ } #pragma omp critical { if (lfm < fm) { fm = lfm; m = lm; } } } #endif double t1 = torc_gettime(); if (found) conv = 1; /* If fm is not within Tolerance, we can go back and retry with better refinement (more iterations) */ if (!found) { x_lo = m - 10*Step; if (x_lo < 0) x_lo = 0; x_hi = m + 10*Step; if (x_hi > 4) x_hi = 4; Step = 0.1*Step; if (Step < 1e-16) { return 1; } else { if (Display) printf("fzerofind (%e): m=%.16f fm=%.16f iter=%ld, time=%lf s\n", Step, m, fm, niters, t1-t0); goto retry; } } if (Display) printf("fzerofind: m=%.16f fm=%.16f iter=%ld, time=%lf s\n", m, fm, niters, t1-t0); *xmin = m; *fmin = fm; if (dump) fclose(fp); return (conv = 1); } int fzerofind1(double *fj, int fn, double pj, double tol, double *xmin, double *fmin) { size_t iter = 0; /*size_t max_iter = data.options.MaxIter;*/ /* USER input - not used here */ double Tol = data.options.Tol; int Display = data.options.Display; double Step = data.options.Step; double x_lo = 0, x_hi = 4.0; int conv = 0; size_t niters; retry: if (Display) printf("fminzero: Step = %e\n", Step); niters = (unsigned long) ((x_hi-x_lo) / Step); double m = 0; double fm = DBL_MAX; double t0 = torc_gettime(); int found = 0; #if !defined(_OPENMP) for (iter = 0; iter < niters; iter++) { double x = x_lo + iter*Step; double fx = Objlogp(x, fj, fn, pj, tol); if (fx < fm) { fm = fx; m = x; } if (fabs(fx) <= Tol) { found = 1; break; } } #else #pragma omp parallel { double lm = 0; double lfm = DBL_MAX; #pragma omp for for (iter = 0; iter < niters; iter++) { double x, fx; if (found == 0) { x = x_lo + iter*Step; fx = Objlogp(x, fj, fn, pj, tol); if (fx < lfm) { lfm = fx; lm = x; } if (fabs(fx) <= Tol) { found = 1; #pragma omp flush(found) } } /* task cancellation ? */ } #pragma omp critical { if (lfm < fm) { fm = lfm; m = lm; } } } #endif double t1 = torc_gettime(); if (found) conv = 1; /* If fm is not within Tolerance, we can go back and retry with better refinement (more iterations) */ if (!found) { Step = 0.1*Step; if (Step < 1e-6) { return 1; } else { if (Display) printf("fzerofind (%e): m=%.16f fm=%.16f iter=%ld, time=%lf s\n", Step, m, fm, niters, t1-t0); goto retry; } } if (Display) printf("fzerofind: m=%.16f fm=%.16f iter=%ld, time=%lf s\n", m, fm, niters, t1-t0); *xmin = m; *fmin = fm; return (conv = 1); } int fminsearch(double *fj, int fn, double pj, double tol, double *xmin, double *fmin) { const gsl_multimin_fminimizer_type *T; gsl_multimin_fminimizer *s = NULL; gsl_vector *ss, *x; gsl_multimin_function minex_func; int conv = 0; size_t iter = 0, max_iter = data.options.MaxIter; /* USER input*/ double Tol = data.options.Tol; int Display = data.options.Display; double Step = data.options.Step; int status; double size; fparam_t fp; fp.fj = fj; fp.fn = fn; fp.pj = pj; fp.tol = tol; /* Starting point */ x = gsl_vector_alloc (1); gsl_vector_set (x, 0, pj); /* Set initial step sizes to Step */ ss = gsl_vector_alloc (1); gsl_vector_set_all (ss, Step); /* input */ /* Initialize method and iterate */ minex_func.n = 1; minex_func.f = Objlogp_gsl2; minex_func.params = &fp; /* T = gsl_multimin_fminimizer_nmsimplex;*/ T = gsl_multimin_fminimizer_nmsimplex2; /* T = gsl_multimin_fminimizer_nmsimplex2rand;*/ s = gsl_multimin_fminimizer_alloc (T, 1); gsl_multimin_fminimizer_set (s, &minex_func, x, ss); if (Display) { printf ("using %s method\n", gsl_multimin_fminimizer_name (s)); } do { iter++; status = gsl_multimin_fminimizer_iterate(s); if (status) break; size = gsl_multimin_fminimizer_size (s); status = gsl_multimin_test_size (size, Tol); if (status == GSL_SUCCESS) { conv = 1; if (Display) printf ("converged to minimum at\n"); } #if 1 else if (fabs(s->fval) <= Tol) { conv = 1; status = GSL_SUCCESS; if (Display) printf ("found minimum at\n"); } #endif if (Display) printf ("%3ld x = %.16lf f() = %.16f size = %.16f\n", iter, gsl_vector_get (s->x, 0), s->fval, size); } while (status == GSL_CONTINUE && iter < max_iter); #if 1 /* double-check */ if ((conv == 1) && (fabs(s->fval) > Tol)) { conv = 0; if (Display) printf ("fminsearch: converged but not found minimum.\n"); } #endif if (conv) { conv = 1; *fmin = s->fval; *xmin = gsl_vector_get(s->x, 0); } else { *fmin = 0; *xmin = 0.0; } gsl_vector_free(x); gsl_vector_free(ss); gsl_multimin_fminimizer_free (s); return conv; } int fmincon(double *fj, int fn, double pj, double tol, double *xmin, double *fmin) { int status; int iter = 0, max_iter = data.options.MaxIter; /* USER input*/ double Tol = data.options.Tol; int Display = data.options.Display; const gsl_min_fminimizer_type *T; gsl_min_fminimizer *s; double x_lo = 0.0, x_hi = 4.0; /* input */ double m = 0.5, fm = 0.0; gsl_function F; int conv = 0; gsl_vector *x; int i; x = gsl_vector_alloc (1); fparam_t fp; fp.fj = fj; fp.fn = fn; fp.pj = pj; fp.tol = tol; F.function = Objlogp_gsl; F.params = &fp; T = gsl_min_fminimizer_brent; /* T = gsl_min_fminimizer_goldensection;*/ /* T = gsl_min_fminimizer_quad_golden;*/ s = gsl_min_fminimizer_alloc (T); double f_lo = Objlogp_gsl(x_lo, &fp); double f_hi = Objlogp_gsl(x_hi, &fp); if (f_lo < f_hi) { m = x_lo; fm = f_lo; } else { m = x_hi; fm = f_hi; } for (i = 0; i < max_iter; i++) { double x = x_lo + i*(x_hi-x_lo)/max_iter; double fx = Objlogp_gsl(x, &fp); if (fx < fm) { m = x; fm = fx; } } #if 1 if (fabs(fm) <= Tol) { conv = 1; gsl_vector_free(x); gsl_min_fminimizer_free (s); if (Display) printf("fmincon: early return with m = %.16f fm = %.16f\n", m, fm); return conv; } #endif if ((fm < f_lo) && (fm < f_hi)) { if (Display) printf("fmincon: initialized with %d tries and m = %f (fm = %f)\n", i, m, fm); } else { if (Display) printf("failed to initialize fmincon (%.16f, %.16f)!\n", f_lo, f_hi); return 0; } gsl_min_fminimizer_set (s, &F, m, x_lo, x_hi); if (Display) { printf ("using %s method\n", gsl_min_fminimizer_name (s)); printf ("%5s [%18s, %18s] %18s %18s %18s\n", "iter", "lower", "upper", "min", "fmin", "err(est)"); printf ("%5d [%.16f, %.16f] %.16f %.16f %.16f\n", iter, x_lo, x_hi, m, fm, x_hi - x_lo); } do { iter++; status = gsl_min_fminimizer_iterate (s); m = gsl_min_fminimizer_x_minimum (s); x_lo = gsl_min_fminimizer_x_lower (s); x_hi = gsl_min_fminimizer_x_upper (s); status = gsl_min_test_interval (x_lo, x_hi, Tol, Tol); if (status == GSL_SUCCESS) { if (Display) printf ("Converged:\n"); conv = 1; } #if 1 else if (fabs(gsl_min_fminimizer_f_minimum(s)) <= Tol) { conv = 1; status = GSL_SUCCESS; if (Display) printf ("found minimum at\n"); } #endif if (Display) printf ("%5d [%.16f, %.16f] %.16f f()=%.16f %.16f\n", iter, x_lo, x_hi, m, gsl_min_fminimizer_f_minimum(s), x_hi - x_lo); } while (status == GSL_CONTINUE && iter < max_iter); #if 1 /* double-check */ if ((conv == 1) && (fabs(gsl_min_fminimizer_f_minimum(s)) > Tol)) { conv = 0; if (Display) printf ("converged but not found minimum.\n"); } #endif if (conv) { conv = 1; gsl_vector_set (x, 0, m); *fmin = Objlogp_gsl(m, &fp); *xmin = m; } else { *fmin = 0; *xmin = 0.0; } gsl_vector_free(x); gsl_min_fminimizer_free (s); return conv; } /*** STATISTICS ***/ #include "posdef.c" /* peh */ void calculate_statistics(double flc[], int n, int nselections, int gen, unsigned int sel[]) { int Display = data.options.Display; /*double pflag = 0;*/ double tolCOV = data.TolCOV; double *CoefVar = runinfo.CoefVar; double *p = runinfo.p; int *Num = data.Num; /* int *currentuniques = runinfo.currentuniques;*/ double *logselection = runinfo.logselection; double Step = data.options.Step; double fmin = 0, xmin = 0; int conv = 0; #if 1 #if 0 conv = fmincon(flc, n, p[gen], tolCOV, &xmin, &fmin); if (Display) printf("fmincon: conv=%d xmin=%.16lf fmin=%.16lf\n", conv, xmin, fmin); #endif #if 1 if (!conv) { conv = fminsearch(flc, n, p[gen], tolCOV, &xmin, &fmin); if (Display) printf("fminsearch: conv=%d xmin=%.16lf fmin=%.16lf\n", conv, xmin, fmin); } #endif #if 1 if (!conv) { conv = fzerofind(flc, n, p[gen], tolCOV, &xmin, &fmin); if (Display) printf("fzerofind: conv=%d xmin=%.16lf fmin=%.16lf\n", conv, xmin, fmin); } #endif #else /* testing */ if (Display) printf("\n"); double t0, t1; t0 = torc_gettime(); int conv_con = fmincon(flc, n, p[gen], tolCOV, &xmin, &fmin); t1 = torc_gettime(); printf("statopt - fmincon: conv=%d xmin=%.16lf fmin=%.16lf in %f s\n", conv_con, xmin, fmin, t1-t0); t0 = torc_gettime(); int conv_search = fminsearch(flc, n, p[gen], tolCOV, &xmin, &fmin); t1 = torc_gettime(); printf("statopt - fminsearch: conv=%d xmin=%.16lf fmin=%.16lf in %f s\n", conv_search, xmin, fmin, t1-t0); t0 = torc_gettime(); int conv_zero = fzerofind(flc, n, p[gen], tolCOV, &xmin, &fmin); t1 = torc_gettime(); printf("statopt - fzerofind: conv=%d xmin=%.16lf fmin=%.16lf in %f s\n", conv_zero, xmin, fmin, t1-t0); /* conv = conv_search; */ conv = conv_zero; #endif /* gen: next generation number */ int j = gen+1; if ((conv)&&(xmin > p[gen])) { p[j] = xmin; CoefVar[j] = fmin; } else { p[j] = p[gen] + 0.1*Step; CoefVar[j] = CoefVar[gen]; } if (p[j] > 1) { /*pflag=p[j-1];*/ p[j] = 1; Num[j]=data.LastNum; } /*print_matrix("p", p, j);*/ /* if (p[j] > 10) { data.Rsq = data.Rsqstrict; } */ /* Compute weights and normalize*/ int i; #if 1 double *flcp = (double *)malloc(n*sizeof(double)); for (i= 0; i<n; i++) flcp[i] = flc[i]*(p[j]-p[j-1]); double fjmax= compute_max (flcp,n ); double *weight = (double *)malloc(n*sizeof(double)); /*PA weight[i] = exp((flc[i]-fjmax)*(p[j]-p[j-1])); 23/06 */ for (i = 0; i < n; i++) weight[i] = exp( flcp[i] - fjmax ); if (display) print_matrix((char *)"weight", weight, n); double sum_weight = compute_sum(weight, n); double *q = (double *)malloc(n*sizeof(double)); for (i = 0; i < n; i++) q[i] = weight[i]/sum_weight; if (display) print_matrix((char *)"runinfo_q", q, n); /*double sum_q = compute_sum(q, n);*/ /*logselection[gen] = log(sum_weight/currentuniques[gen])+fjmax*(p[gen+1]-p[gen]); PA definition change for all types of resampling 23/06/15*/ logselection[gen]= log(sum_weight) + fjmax -log(n); #else double fjmax = compute_max(flc, n); double weight[n]; for (i = 0; i < n; i++) weight[i] = exp((flc[i]-fjmax)*(p[j]-p[j-1])); if (display) print_matrix((char *)"weight", weight, n); double sum_weight = compute_sum(weight, n); double q[n]; for (i = 0; i < n; i++) q[i] = weight[i]/sum_weight; if (display) print_matrix((char *)"runinfo_q", q, n); /*double sum_q = compute_sum(q, n);*/ logselection[gen] = log(sum_weight/currentuniques[gen])+fjmax*(p[gen+1]-p[gen]); #endif if (display) print_matrix((char *)"logselection", logselection, gen+1); double mean_q = compute_mean(q, n); double std_q = compute_std(q, n, mean_q); CoefVar[gen] = std_q/mean_q; if (display) print_matrix((char *)"CoefVar", CoefVar, gen+1); size_t K = n; unsigned int N = 1; unsigned int samples = n; /*1000;*/ unsigned int *nn = (unsigned int *)malloc(samples*sizeof(unsigned int)); for (i = 0; i < samples; i++) sel[i] = 0; if (nselections == 0) nselections = samples; /* n;*/ #if 0 int k; for (k = 0; k < nselections; k++) { /*gsl_ran_multinomial (r, K, N, q, nn);*/ multinomialrand (K, N, q, nn); for (i = 0; i < K; i++) sel[i]+=nn[i]; } #else N = nselections; /*gsl_ran_multinomial (r, K, N, q, nn);*/ multinomialrand (K, N, q, nn); for (i = 0; i < K; i++) sel[i]+=nn[i]; #endif if (display) { printf("\n s = ["); for (i = 0; i < K; i++) printf("%d ", sel[i]); printf("]\n"); } /* compute SS */ int PROBDIM = data.Nth; double mean_of_theta[PROBDIM]; for (i = 0; i < PROBDIM; i++) { mean_of_theta[i] = 0; for (j = 0; j < n; j++) mean_of_theta[i]+=curgen_db.entry[j].point[i]*q[j]; runinfo.meantheta[gen][i] = mean_of_theta[i]; } if (display) print_matrix((char *)"mean_of_theta", mean_of_theta, PROBDIM); double meanv[PROBDIM]; for (i = 0; i < PROBDIM; i++) { meanv[i] = mean_of_theta[i]; } for (i = 0; i < PROBDIM; i++) { for (j = 0; j < PROBDIM; j++) { double s; int k; s = 0; for (k = 0; k < n; k++) { s += q[k]*(curgen_db.entry[k].point[i]-meanv[i])*(curgen_db.entry[k].point[j]-meanv[j]); } runinfo.SS[i][j] = runinfo.SS[j][i] = s; } } #if 1 /* peh:check this */ { int fixed = make_posdef(runinfo.SS[0], PROBDIM, 2); if (fixed) { printf("WARNING: runinfo.SS was forced to become positive definite\n"); } } #endif if (display) print_matrix_2d((char *)"runinfo.SS", runinfo.SS, PROBDIM, PROBDIM); free(flcp); free(weight); free(q); free(nn); } double logpriorpdf(double *theta, int n) { /* peh:check this */ double res = 0; int i; switch(data.prior_type) { case 0: /* uniform */ res = 0; for (i = 0; i < n; i++) { /* here we only get samples which are guaranteed to be inside the prior region */ res += -log(data.upperbound[i] - data.lowerbound[i]); } break; case 1: /* gaussian */ res = logmvnpdf(n, theta, data.prior_mu, data.prior_sigma); break; case 2: /* file - nothing to do*/ break; case 3: /* composite */ res = 0; for (i = 0; i < n; i++) { /* uniform */ if(data.compositeprior_distr[i] == 0) { /* here we only get samples which are guaranteed to be inside the prior region */ res += -log(data.upperbound[i] - data.lowerbound[i]); } /* normal */ else if(data.compositeprior_distr[i] == 1) { res += -0.5*( log( 2.0*M_PI*pow(data.prior_sigma[i],2) ) + pow( theta[i]-data.prior_mu[i]/data.prior_sigma[i], 2 ) ); } } break; } return res; }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/option-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif /* Definitions */ #define LCMSHDRI #if !defined(MAGICKCORE_HDRI_SUPPORT) #if (MAGICKCORE_QUANTUM_DEPTH == 8) #undef LCMSHDRI #define LCMSScaleSource(pixel) ScaleQuantumToShort(pixel) #define LCMSScaleTarget(pixel) ScaleShortToQuantum(pixel) typedef unsigned short LCMSType; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) #undef LCMSHDRI #define LCMSScaleSource(pixel) (pixel) #define LCMSScaleTarget(pixel) (pixel) typedef unsigned short LCMSType; #endif #endif #if defined(LCMSHDRI) #define LCMSScaleSource(pixel) (source_scale*QuantumScale*(pixel)) #define LCMSScaleTarget(pixel) ClampToQuantum(target_scale*QuantumRange*(pixel)) typedef double LCMSType; #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); image->color_profile.length=clone_image->color_profile.length; image->color_profile.info=clone_image->color_profile.info; image->iptc_profile.length=clone_image->iptc_profile.length; image->iptc_profile.info=clone_image->iptc_profile.info; if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) static LCMSType **DestroyPixelThreadSet(LCMSType **pixels) { register ssize_t i; if (pixels == (LCMSType **) NULL) return((LCMSType **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (LCMSType *) NULL) pixels[i]=(LCMSType *) RelinquishMagickMemory(pixels[i]); pixels=(LCMSType **) RelinquishMagickMemory(pixels); return(pixels); } static LCMSType **AcquirePixelThreadSet(const size_t columns, const size_t channels) { LCMSType **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(LCMSType **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (LCMSType **) NULL) return((LCMSType **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(LCMSType *) AcquireQuantumMemory(columns,channels* sizeof(**pixels)); if (pixels[i] == (LCMSType *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image, const cmsHPROFILE source_profile,const cmsUInt32Number source_type, const cmsHPROFILE target_profile,const cmsUInt32Number target_type, const int intent,const cmsUInt32Number flags) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR((cmsContext) image,source_profile, source_type,target_profile,target_type,intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } #endif #if defined(MAGICKCORE_LCMS_DELEGATE) static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { Image *image; (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); image=(Image *) context; if (image != (Image *) NULL) (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageWarning,"UnableToTransformColorspace","`%s'",image->filename); } #endif static MagickBooleanType SetsRGBImageProfile(Image *image) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length, const MagickBooleanType magick_unused(clone)) { #define ProfileImageTag "Profile/Image" #define ThrowProfileException(severity,tag,context) \ { \ if (source_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_profile); \ if (target_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; magick_unreferenced(clone); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace"); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image); value=GetImageProperty(image,"exif:InteroperabilityIndex"); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image); /* Future. value=GetImageProperty(image,"exif:InteroperabilityIndex"); if (LocaleCompare(value,"R03.") != 0) (void) SetAdobeRGB1998ImageProfile(image); */ icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(&image->exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)", image->filename); #else { cmsHPROFILE source_profile; /* Transform pixel colors as defined by the color profiles. */ cmsSetLogErrorHandler(LCMSExceptionHandler); source_profile=cmsOpenProfileFromMemTHR((cmsContext) image, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowBinaryImageException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile); else { CacheView *image_view; ColorspaceType source_colorspace, target_colorspace; cmsColorSpaceSignature signature; cmsHPROFILE target_profile; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags, source_type, target_type; ExceptionInfo *exception; int intent; LCMSType **magick_restrict source_pixels, **magick_restrict target_pixels; #if defined(LCMSHDRI) LCMSType source_scale, target_scale; #endif MagickOffsetType progress; size_t source_channels, target_channels; ssize_t y; exception=(&image->exception); target_profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_profile=source_profile; source_profile=cmsOpenProfileFromMemTHR((cmsContext) image, GetStringInfoDatum(icc_profile),(cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } #if defined(LCMSHDRI) source_scale=1.0; #endif source_channels=3; switch (cmsGetColorSpace(source_profile)) { case cmsSigCmykData: { source_colorspace=CMYKColorspace; source_channels=4; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_CMYK_DBL; source_scale=100.0; #else source_type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { source_colorspace=GRAYColorspace; source_channels=1; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_GRAY_DBL; #else source_type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { source_colorspace=LabColorspace; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_Lab_DBL; source_scale=100.0; #else source_type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigLuvData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YUV_16; break; } #endif case cmsSigRgbData: { #if defined(LCMSHDRI) source_colorspace=sRGBColorspace; source_type=(cmsUInt32Number) TYPE_RGB_DBL; #else source_type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { source_colorspace=XYZColorspace; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_XYZ_DBL; #else source_type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigYCbCrData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YCbCr_16; break; } #endif default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } (void) source_colorspace; signature=cmsGetPCS(source_profile); if (target_profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_profile); #if defined(LCMSHDRI) target_scale=1.0; #endif target_channels=3; switch (signature) { case cmsSigCmykData: { target_colorspace=CMYKColorspace; target_channels=4; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_CMYK_DBL; target_scale=0.01; #else target_type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { target_colorspace=GRAYColorspace; target_channels=1; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_GRAY_DBL; #else target_type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { target_colorspace=LabColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_Lab_DBL; target_scale=0.01; #else target_type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigLuvData: { target_colorspace=YUVColorspace; target_type=(cmsUInt32Number) TYPE_YUV_16; break; } #endif case cmsSigRgbData: { target_colorspace=sRGBColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_RGB_DBL; #else target_type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { target_colorspace=XYZColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_XYZ_DBL; #else target_type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break; case PerceptualIntent: intent=INTENT_PERCEPTUAL; break; case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break; case SaturationIntent: intent=INTENT_SATURATION; break; default: intent=INTENT_PERCEPTUAL; break; } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(image,source_profile, source_type,target_profile,target_type,intent,flags); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_pixels=AcquirePixelThreadSet(image->columns,source_channels); target_pixels=AcquirePixelThreadSet(image->columns,target_channels); if ((source_pixels == (LCMSType **) NULL) || (target_pixels == (LCMSType **) NULL)) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (source_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); return(MagickFalse); } if (target_colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_colorspace); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register LCMSType *p; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); p=source_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=LCMSScaleSource(GetPixelRed(q)); if (source_channels > 1) { *p++=LCMSScaleSource(GetPixelGreen(q)); *p++=LCMSScaleSource(GetPixelBlue(q)); } if (source_channels > 3) *p++=LCMSScaleSource(GetPixelIndex(indexes+x)); q++; } cmsDoTransform(transform[id],source_pixels[id],target_pixels[id], (unsigned int) image->columns); p=target_pixels[id]; q-=image->columns; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,LCMSScaleTarget(*p)); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); p++; if (target_channels > 1) { SetPixelGreen(q,LCMSScaleTarget(*p)); p++; SetPixelBlue(q,LCMSScaleTarget(*p)); p++; } if (target_channels > 3) { SetPixelIndex(indexes+x,LCMSScaleTarget(*p)); p++; } q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ProfileImage) #endif proceed=SetImageProgress(image,ProfileImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_colorspace); switch (signature) { case cmsSigRgbData: { image->type=image->matte == MagickFalse ? TrueColorType : TrueColorMatteType; break; } case cmsSigCmykData: { image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; break; } case cmsSigGrayData: { image->type=image->matte == MagickFalse ? GrayscaleType : GrayscaleMatteType; break; } default: break; } target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); } (void) cmsCloseProfile(source_profile); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; register const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block) { const unsigned char *datum; register const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->x_resolution=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->y_resolution=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->x_resolution/=2.54; image->y_resolution/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive) { char key[MaxTextExtent], property[MaxTextExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MaxTextExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if ((status != MagickFalse) && ((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0))) { const StringInfo *icc_profile; /* Continue to support deprecated color profile member. */ icc_profile=GetImageProfile(image,name); if (icc_profile != (const StringInfo *) NULL) { image->color_profile.length=GetStringInfoLength(icc_profile); image->color_profile.info=GetStringInfoDatum(icc_profile); } } if ((status != MagickFalse) && ((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0))) { const StringInfo *iptc_profile; /* Continue to support deprecated IPTC profile member. */ iptc_profile=GetImageProfile(image,name); if (iptc_profile != (const StringInfo *) NULL) { image->iptc_profile.length=GetStringInfoLength(iptc_profile); image->iptc_profile.info=GetStringInfoDatum(iptc_profile); } } if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,profile); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,profile); } /* Inject profile into image properties. */ (void) FormatLocaleString(property,MaxTextExtent,"%s:*",name); (void) GetImageProperty(image,property); return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile) { return(SetImageProfileInternal(image,name,profile,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->x_resolution*2.54* 65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) (image->x_resolution* 65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->y_resolution*2.54* 65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) (image->y_resolution* 65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } static MagickBooleanType SyncExifProfile(Image *image, StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickExport MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); }
grandi_cpu.c
#include <math.h> #include <stdint.h> #include <string.h> enum state { STATE_m, STATE_h, STATE_j, STATE_x_kr, STATE_x_ks, STATE_x_to_s, STATE_y_to_s, STATE_x_to_f, STATE_y_to_f, STATE_d, STATE_f, STATE_f_Ca_Bj, STATE_f_Ca_Bsl, STATE_Ry_Rr, STATE_Ry_Ro, STATE_Ry_Ri, STATE_Na_Bj, STATE_Na_Bsl, STATE_Tn_CL, STATE_Tn_CHc, STATE_Tn_CHm, STATE_CaM, STATE_Myo_c, STATE_Myo_m, STATE_SRB, STATE_SLL_j, STATE_SLL_sl, STATE_SLH_j, STATE_SLH_sl, STATE_Csqn_b, STATE_Ca_sr, STATE_Na_j, STATE_Na_sl, STATE_Na_i, STATE_K_i, STATE_Ca_j, STATE_Ca_sl, STATE_Ca_i, STATE_V_m, NUM_STATES, }; enum parameter { PARAM_Fjunc, PARAM_Fjunc_CaL, PARAM_cellLength, PARAM_cellRadius, PARAM_distJuncSL, PARAM_distSLcyto, PARAM_junctionLength, PARAM_junctionRadius, PARAM_GNa, PARAM_GNaB, PARAM_IbarNaK, PARAM_KmKo, PARAM_KmNaip, PARAM_Q10KmNai, PARAM_Q10NaK, PARAM_GKr, PARAM_GKp, PARAM_GKs, PARAM_pNaK, PARAM_GK1, PARAM_Gto, PARAM_epi, PARAM_GClB, PARAM_GClCa, PARAM_KdClCa, PARAM_GCaL, PARAM_Q10CaL, PARAM_pCa, PARAM_pK, PARAM_pNa, PARAM_IbarNCX, PARAM_Kdact, PARAM_KmCai, PARAM_KmCao, PARAM_KmNai, PARAM_KmNao, PARAM_Q10NCX, PARAM_ksat, PARAM_nu, PARAM_IbarSLCaP, PARAM_KmPCa, PARAM_Q10SLCaP, PARAM_GCaB, PARAM_Kmf, PARAM_Kmr, PARAM_MaxSR, PARAM_MinSR, PARAM_Q10SRCaP, PARAM_Vmax_SRCaP, PARAM_ec50SR, PARAM_hillSRCaP, PARAM_kiCa, PARAM_kim, PARAM_koCa, PARAM_kom, PARAM_ks, PARAM_Bmax_Naj, PARAM_Bmax_Nasl, PARAM_koff_na, PARAM_kon_na, PARAM_Bmax_CaM, PARAM_Bmax_SR, PARAM_Bmax_TnChigh, PARAM_Bmax_TnClow, PARAM_Bmax_myosin, PARAM_koff_cam, PARAM_koff_myoca, PARAM_koff_myomg, PARAM_koff_sr, PARAM_koff_tnchca, PARAM_koff_tnchmg, PARAM_koff_tncl, PARAM_kon_cam, PARAM_kon_myoca, PARAM_kon_myomg, PARAM_kon_sr, PARAM_kon_tnchca, PARAM_kon_tnchmg, PARAM_kon_tncl, PARAM_Bmax_SLhighj0, PARAM_Bmax_SLhighsl0, PARAM_Bmax_SLlowj0, PARAM_Bmax_SLlowsl0, PARAM_koff_slh, PARAM_koff_sll, PARAM_kon_slh, PARAM_kon_sll, PARAM_Bmax_Csqn0, PARAM_DcaJuncSL, PARAM_DcaSLcyto, PARAM_J_ca_juncsl, PARAM_J_ca_slmyo, PARAM_koff_csqn, PARAM_kon_csqn, PARAM_DnaJuncSL, PARAM_DnaSLcyto, PARAM_J_na_juncsl, PARAM_J_na_slmyo, PARAM_Nao, PARAM_Ko, PARAM_Cao, PARAM_Cli, PARAM_Clo, PARAM_Mgi, PARAM_Cmem, PARAM_Frdy, PARAM_R, PARAM_Temp, PARAM_stim_amplitude, PARAM_stim_duration, PARAM_stim_period, PARAM_stim_start, NUM_PARAMS, }; // State index int state_index(const char name[]) { if (strcmp(name, "m") == 0) { return STATE_m; } else if (strcmp(name, "h") == 0) { return STATE_h; } else if (strcmp(name, "j") == 0) { return STATE_j; } else if (strcmp(name, "x_kr") == 0) { return STATE_x_kr; } else if (strcmp(name, "x_ks") == 0) { return STATE_x_ks; } else if (strcmp(name, "x_to_s") == 0) { return STATE_x_to_s; } else if (strcmp(name, "y_to_s") == 0) { return STATE_y_to_s; } else if (strcmp(name, "x_to_f") == 0) { return STATE_x_to_f; } else if (strcmp(name, "y_to_f") == 0) { return STATE_y_to_f; } else if (strcmp(name, "d") == 0) { return STATE_d; } else if (strcmp(name, "f") == 0) { return STATE_f; } else if (strcmp(name, "f_Ca_Bj") == 0) { return STATE_f_Ca_Bj; } else if (strcmp(name, "f_Ca_Bsl") == 0) { return STATE_f_Ca_Bsl; } else if (strcmp(name, "Ry_Rr") == 0) { return STATE_Ry_Rr; } else if (strcmp(name, "Ry_Ro") == 0) { return STATE_Ry_Ro; } else if (strcmp(name, "Ry_Ri") == 0) { return STATE_Ry_Ri; } else if (strcmp(name, "Na_Bj") == 0) { return STATE_Na_Bj; } else if (strcmp(name, "Na_Bsl") == 0) { return STATE_Na_Bsl; } else if (strcmp(name, "Tn_CL") == 0) { return STATE_Tn_CL; } else if (strcmp(name, "Tn_CHc") == 0) { return STATE_Tn_CHc; } else if (strcmp(name, "Tn_CHm") == 0) { return STATE_Tn_CHm; } else if (strcmp(name, "CaM") == 0) { return STATE_CaM; } else if (strcmp(name, "Myo_c") == 0) { return STATE_Myo_c; } else if (strcmp(name, "Myo_m") == 0) { return STATE_Myo_m; } else if (strcmp(name, "SRB") == 0) { return STATE_SRB; } else if (strcmp(name, "SLL_j") == 0) { return STATE_SLL_j; } else if (strcmp(name, "SLL_sl") == 0) { return STATE_SLL_sl; } else if (strcmp(name, "SLH_j") == 0) { return STATE_SLH_j; } else if (strcmp(name, "SLH_sl") == 0) { return STATE_SLH_sl; } else if (strcmp(name, "Csqn_b") == 0) { return STATE_Csqn_b; } else if (strcmp(name, "Ca_sr") == 0) { return STATE_Ca_sr; } else if (strcmp(name, "Na_j") == 0) { return STATE_Na_j; } else if (strcmp(name, "Na_sl") == 0) { return STATE_Na_sl; } else if (strcmp(name, "Na_i") == 0) { return STATE_Na_i; } else if (strcmp(name, "K_i") == 0) { return STATE_K_i; } else if (strcmp(name, "Ca_j") == 0) { return STATE_Ca_j; } else if (strcmp(name, "Ca_sl") == 0) { return STATE_Ca_sl; } else if (strcmp(name, "Ca_i") == 0) { return STATE_Ca_i; } else if (strcmp(name, "V_m") == 0) { return STATE_V_m; } return -1; } // Parameter index int parameter_index(const char name[]) { if (strcmp(name, "Fjunc") == 0) { return PARAM_Fjunc; } else if (strcmp(name, "Fjunc_CaL") == 0) { return PARAM_Fjunc_CaL; } else if (strcmp(name, "cellLength") == 0) { return PARAM_cellLength; } else if (strcmp(name, "cellRadius") == 0) { return PARAM_cellRadius; } else if (strcmp(name, "distJuncSL") == 0) { return PARAM_distJuncSL; } else if (strcmp(name, "distSLcyto") == 0) { return PARAM_distSLcyto; } else if (strcmp(name, "junctionLength") == 0) { return PARAM_junctionLength; } else if (strcmp(name, "junctionRadius") == 0) { return PARAM_junctionRadius; } else if (strcmp(name, "GNa") == 0) { return PARAM_GNa; } else if (strcmp(name, "GNaB") == 0) { return PARAM_GNaB; } else if (strcmp(name, "IbarNaK") == 0) { return PARAM_IbarNaK; } else if (strcmp(name, "KmKo") == 0) { return PARAM_KmKo; } else if (strcmp(name, "KmNaip") == 0) { return PARAM_KmNaip; } else if (strcmp(name, "Q10KmNai") == 0) { return PARAM_Q10KmNai; } else if (strcmp(name, "Q10NaK") == 0) { return PARAM_Q10NaK; } else if (strcmp(name, "GKr") == 0) { return PARAM_GKr; } else if (strcmp(name, "GKp") == 0) { return PARAM_GKp; } else if (strcmp(name, "GKs") == 0) { return PARAM_GKs; } else if (strcmp(name, "pNaK") == 0) { return PARAM_pNaK; } else if (strcmp(name, "GK1") == 0) { return PARAM_GK1; } else if (strcmp(name, "Gto") == 0) { return PARAM_Gto; } else if (strcmp(name, "epi") == 0) { return PARAM_epi; } else if (strcmp(name, "GClB") == 0) { return PARAM_GClB; } else if (strcmp(name, "GClCa") == 0) { return PARAM_GClCa; } else if (strcmp(name, "KdClCa") == 0) { return PARAM_KdClCa; } else if (strcmp(name, "GCaL") == 0) { return PARAM_GCaL; } else if (strcmp(name, "Q10CaL") == 0) { return PARAM_Q10CaL; } else if (strcmp(name, "pCa") == 0) { return PARAM_pCa; } else if (strcmp(name, "pK") == 0) { return PARAM_pK; } else if (strcmp(name, "pNa") == 0) { return PARAM_pNa; } else if (strcmp(name, "IbarNCX") == 0) { return PARAM_IbarNCX; } else if (strcmp(name, "Kdact") == 0) { return PARAM_Kdact; } else if (strcmp(name, "KmCai") == 0) { return PARAM_KmCai; } else if (strcmp(name, "KmCao") == 0) { return PARAM_KmCao; } else if (strcmp(name, "KmNai") == 0) { return PARAM_KmNai; } else if (strcmp(name, "KmNao") == 0) { return PARAM_KmNao; } else if (strcmp(name, "Q10NCX") == 0) { return PARAM_Q10NCX; } else if (strcmp(name, "ksat") == 0) { return PARAM_ksat; } else if (strcmp(name, "nu") == 0) { return PARAM_nu; } else if (strcmp(name, "IbarSLCaP") == 0) { return PARAM_IbarSLCaP; } else if (strcmp(name, "KmPCa") == 0) { return PARAM_KmPCa; } else if (strcmp(name, "Q10SLCaP") == 0) { return PARAM_Q10SLCaP; } else if (strcmp(name, "GCaB") == 0) { return PARAM_GCaB; } else if (strcmp(name, "Kmf") == 0) { return PARAM_Kmf; } else if (strcmp(name, "Kmr") == 0) { return PARAM_Kmr; } else if (strcmp(name, "MaxSR") == 0) { return PARAM_MaxSR; } else if (strcmp(name, "MinSR") == 0) { return PARAM_MinSR; } else if (strcmp(name, "Q10SRCaP") == 0) { return PARAM_Q10SRCaP; } else if (strcmp(name, "Vmax_SRCaP") == 0) { return PARAM_Vmax_SRCaP; } else if (strcmp(name, "ec50SR") == 0) { return PARAM_ec50SR; } else if (strcmp(name, "hillSRCaP") == 0) { return PARAM_hillSRCaP; } else if (strcmp(name, "kiCa") == 0) { return PARAM_kiCa; } else if (strcmp(name, "kim") == 0) { return PARAM_kim; } else if (strcmp(name, "koCa") == 0) { return PARAM_koCa; } else if (strcmp(name, "kom") == 0) { return PARAM_kom; } else if (strcmp(name, "ks") == 0) { return PARAM_ks; } else if (strcmp(name, "Bmax_Naj") == 0) { return PARAM_Bmax_Naj; } else if (strcmp(name, "Bmax_Nasl") == 0) { return PARAM_Bmax_Nasl; } else if (strcmp(name, "koff_na") == 0) { return PARAM_koff_na; } else if (strcmp(name, "kon_na") == 0) { return PARAM_kon_na; } else if (strcmp(name, "Bmax_CaM") == 0) { return PARAM_Bmax_CaM; } else if (strcmp(name, "Bmax_SR") == 0) { return PARAM_Bmax_SR; } else if (strcmp(name, "Bmax_TnChigh") == 0) { return PARAM_Bmax_TnChigh; } else if (strcmp(name, "Bmax_TnClow") == 0) { return PARAM_Bmax_TnClow; } else if (strcmp(name, "Bmax_myosin") == 0) { return PARAM_Bmax_myosin; } else if (strcmp(name, "koff_cam") == 0) { return PARAM_koff_cam; } else if (strcmp(name, "koff_myoca") == 0) { return PARAM_koff_myoca; } else if (strcmp(name, "koff_myomg") == 0) { return PARAM_koff_myomg; } else if (strcmp(name, "koff_sr") == 0) { return PARAM_koff_sr; } else if (strcmp(name, "koff_tnchca") == 0) { return PARAM_koff_tnchca; } else if (strcmp(name, "koff_tnchmg") == 0) { return PARAM_koff_tnchmg; } else if (strcmp(name, "koff_tncl") == 0) { return PARAM_koff_tncl; } else if (strcmp(name, "kon_cam") == 0) { return PARAM_kon_cam; } else if (strcmp(name, "kon_myoca") == 0) { return PARAM_kon_myoca; } else if (strcmp(name, "kon_myomg") == 0) { return PARAM_kon_myomg; } else if (strcmp(name, "kon_sr") == 0) { return PARAM_kon_sr; } else if (strcmp(name, "kon_tnchca") == 0) { return PARAM_kon_tnchca; } else if (strcmp(name, "kon_tnchmg") == 0) { return PARAM_kon_tnchmg; } else if (strcmp(name, "kon_tncl") == 0) { return PARAM_kon_tncl; } else if (strcmp(name, "Bmax_SLhighj0") == 0) { return PARAM_Bmax_SLhighj0; } else if (strcmp(name, "Bmax_SLhighsl0") == 0) { return PARAM_Bmax_SLhighsl0; } else if (strcmp(name, "Bmax_SLlowj0") == 0) { return PARAM_Bmax_SLlowj0; } else if (strcmp(name, "Bmax_SLlowsl0") == 0) { return PARAM_Bmax_SLlowsl0; } else if (strcmp(name, "koff_slh") == 0) { return PARAM_koff_slh; } else if (strcmp(name, "koff_sll") == 0) { return PARAM_koff_sll; } else if (strcmp(name, "kon_slh") == 0) { return PARAM_kon_slh; } else if (strcmp(name, "kon_sll") == 0) { return PARAM_kon_sll; } else if (strcmp(name, "Bmax_Csqn0") == 0) { return PARAM_Bmax_Csqn0; } else if (strcmp(name, "DcaJuncSL") == 0) { return PARAM_DcaJuncSL; } else if (strcmp(name, "DcaSLcyto") == 0) { return PARAM_DcaSLcyto; } else if (strcmp(name, "J_ca_juncsl") == 0) { return PARAM_J_ca_juncsl; } else if (strcmp(name, "J_ca_slmyo") == 0) { return PARAM_J_ca_slmyo; } else if (strcmp(name, "koff_csqn") == 0) { return PARAM_koff_csqn; } else if (strcmp(name, "kon_csqn") == 0) { return PARAM_kon_csqn; } else if (strcmp(name, "DnaJuncSL") == 0) { return PARAM_DnaJuncSL; } else if (strcmp(name, "DnaSLcyto") == 0) { return PARAM_DnaSLcyto; } else if (strcmp(name, "J_na_juncsl") == 0) { return PARAM_J_na_juncsl; } else if (strcmp(name, "J_na_slmyo") == 0) { return PARAM_J_na_slmyo; } else if (strcmp(name, "Nao") == 0) { return PARAM_Nao; } else if (strcmp(name, "Ko") == 0) { return PARAM_Ko; } else if (strcmp(name, "Cao") == 0) { return PARAM_Cao; } else if (strcmp(name, "Cli") == 0) { return PARAM_Cli; } else if (strcmp(name, "Clo") == 0) { return PARAM_Clo; } else if (strcmp(name, "Mgi") == 0) { return PARAM_Mgi; } else if (strcmp(name, "Cmem") == 0) { return PARAM_Cmem; } else if (strcmp(name, "Frdy") == 0) { return PARAM_Frdy; } else if (strcmp(name, "R") == 0) { return PARAM_R; } else if (strcmp(name, "Temp") == 0) { return PARAM_Temp; } else if (strcmp(name, "stim_amplitude") == 0) { return PARAM_stim_amplitude; } else if (strcmp(name, "stim_duration") == 0) { return PARAM_stim_duration; } else if (strcmp(name, "stim_period") == 0) { return PARAM_stim_period; } else if (strcmp(name, "stim_start") == 0) { return PARAM_stim_start; } return -1; } // Init state values void init_state_values(double* states, uint64_t n_nodes) { #pragma omp parallel for for (uint64_t i = 0; i < n_nodes; i++) { states[n_nodes * STATE_m + i] = 0.003793087414436; states[n_nodes * STATE_h + i] = 0.626221949492493; states[n_nodes * STATE_j + i] = 0.624553572490432; states[n_nodes * STATE_x_kr + i] = 0.0210022533039071; states[n_nodes * STATE_x_ks + i] = 0.00428016666258923; states[n_nodes * STATE_x_to_s + i] = 0.000440445885642567; states[n_nodes * STATE_y_to_s + i] = 0.785115828275182; states[n_nodes * STATE_x_to_f + i] = 0.000440438103758954; states[n_nodes * STATE_y_to_f + i] = 0.999995844038706; states[n_nodes * STATE_d + i] = 2.92407183949469e-06; states[n_nodes * STATE_f + i] = 0.995135796703515; states[n_nodes * STATE_f_Ca_Bj + i] = 0.0246760872105795; states[n_nodes * STATE_f_Ca_Bsl + i] = 0.0152723084239416; states[n_nodes * STATE_Ry_Rr + i] = 0.890806040818203; states[n_nodes * STATE_Ry_Ro + i] = 7.40481128853622e-07; states[n_nodes * STATE_Ry_Ri + i] = 9.07666168960848e-08; states[n_nodes * STATE_Na_Bj + i] = 3.4543773303328; states[n_nodes * STATE_Na_Bsl + i] = 0.753740951477775; states[n_nodes * STATE_Tn_CL + i] = 0.00893455096919132; states[n_nodes * STATE_Tn_CHc + i] = 0.117412025936615; states[n_nodes * STATE_Tn_CHm + i] = 0.0106160166692932; states[n_nodes * STATE_CaM + i] = 0.000295573424135051; states[n_nodes * STATE_Myo_c + i] = 0.00192322252438022; states[n_nodes * STATE_Myo_m + i] = 0.137560495022823; states[n_nodes * STATE_SRB + i] = 0.00217360235649355; states[n_nodes * STATE_SLL_j + i] = 0.00740524521680039; states[n_nodes * STATE_SLL_sl + i] = 0.00990339304377132; states[n_nodes * STATE_SLH_j + i] = 0.0735890020284214; states[n_nodes * STATE_SLH_sl + i] = 0.114583623436917; states[n_nodes * STATE_Csqn_b + i] = 1.19723145924432; states[n_nodes * STATE_Ca_sr + i] = 0.554760499828172; states[n_nodes * STATE_Na_j + i] = 8.40537012592918; states[n_nodes * STATE_Na_sl + i] = 8.40491910001025; states[n_nodes * STATE_Na_i + i] = 8.40513364344858; states[n_nodes * STATE_K_i + i] = 120.0; states[n_nodes * STATE_Ca_j + i] = 0.000175882395147342; states[n_nodes * STATE_Ca_sl + i] = 0.000106779509977354; states[n_nodes * STATE_Ca_i + i] = 8.72509677797499e-05; states[n_nodes * STATE_V_m + i] = -81.4552030512661; } } void init_state_values_2d_from_array(double* states, double *values, uint64_t n_nodes) { #pragma omp parallel for for (uint64_t i = 0; i < n_nodes; i++) { states[n_nodes * STATE_m + i] = values[STATE_m]; states[n_nodes * STATE_h + i] = values[STATE_h]; states[n_nodes * STATE_j + i] = values[STATE_j]; states[n_nodes * STATE_x_kr + i] = values[STATE_x_kr]; states[n_nodes * STATE_x_ks + i] = values[STATE_x_ks]; states[n_nodes * STATE_x_to_s + i] = values[STATE_x_to_s]; states[n_nodes * STATE_y_to_s + i] = values[STATE_y_to_s]; states[n_nodes * STATE_x_to_f + i] = values[STATE_x_to_f]; states[n_nodes * STATE_y_to_f + i] = values[STATE_y_to_f]; states[n_nodes * STATE_d + i] = values[STATE_d]; states[n_nodes * STATE_f + i] = values[STATE_f]; states[n_nodes * STATE_f_Ca_Bj + i] = values[STATE_f_Ca_Bj]; states[n_nodes * STATE_f_Ca_Bsl + i] = values[STATE_f_Ca_Bsl]; states[n_nodes * STATE_Ry_Rr + i] = values[STATE_Ry_Rr]; states[n_nodes * STATE_Ry_Ro + i] = values[STATE_Ry_Ro]; states[n_nodes * STATE_Ry_Ri + i] = values[STATE_Ry_Ri]; states[n_nodes * STATE_Na_Bj + i] = values[STATE_Na_Bj]; states[n_nodes * STATE_Na_Bsl + i] = values[STATE_Na_Bsl]; states[n_nodes * STATE_Tn_CL + i] = values[STATE_Tn_CL]; states[n_nodes * STATE_Tn_CHc + i] = values[STATE_Tn_CHc]; states[n_nodes * STATE_Tn_CHm + i] = values[STATE_Tn_CHm]; states[n_nodes * STATE_CaM + i] = values[STATE_CaM]; states[n_nodes * STATE_Myo_c + i] = values[STATE_Myo_c]; states[n_nodes * STATE_Myo_m + i] = values[STATE_Myo_m]; states[n_nodes * STATE_SRB + i] = values[STATE_SRB]; states[n_nodes * STATE_SLL_j + i] = values[STATE_SLL_j]; states[n_nodes * STATE_SLL_sl + i] = values[STATE_SLL_sl]; states[n_nodes * STATE_SLH_j + i] = values[STATE_SLH_j]; states[n_nodes * STATE_SLH_sl + i] = values[STATE_SLH_sl]; states[n_nodes * STATE_Csqn_b + i] = values[STATE_Csqn_b]; states[n_nodes * STATE_Ca_sr + i] = values[STATE_Ca_sr]; states[n_nodes * STATE_Na_j + i] = values[STATE_Na_j]; states[n_nodes * STATE_Na_sl + i] = values[STATE_Na_sl]; states[n_nodes * STATE_Na_i + i] = values[STATE_Na_i]; states[n_nodes * STATE_K_i + i] = values[STATE_K_i]; states[n_nodes * STATE_Ca_j + i] = values[STATE_Ca_j]; states[n_nodes * STATE_Ca_sl + i] = values[STATE_Ca_sl]; states[n_nodes * STATE_Ca_i + i] = values[STATE_Ca_i]; states[n_nodes * STATE_V_m + i] = values[STATE_V_m]; } } // Default parameter values void init_parameter_values(double* parameters, uint64_t n_nodes) { #pragma omp parallel for for (uint64_t i = 0; i < n_nodes; i++) { parameters[n_nodes * PARAM_Fjunc + i] = 0.11; parameters[n_nodes * PARAM_Fjunc_CaL + i] = 0.9; parameters[n_nodes * PARAM_cellLength + i] = 100.0; parameters[n_nodes * PARAM_cellRadius + i] = 10.25; parameters[n_nodes * PARAM_distJuncSL + i] = 0.5; parameters[n_nodes * PARAM_distSLcyto + i] = 0.45; parameters[n_nodes * PARAM_junctionLength + i] = 0.16; parameters[n_nodes * PARAM_junctionRadius + i] = 0.015; parameters[n_nodes * PARAM_GNa + i] = 23.0; parameters[n_nodes * PARAM_GNaB + i] = 0.000597; parameters[n_nodes * PARAM_IbarNaK + i] = 1.8; parameters[n_nodes * PARAM_KmKo + i] = 1.5; parameters[n_nodes * PARAM_KmNaip + i] = 11.0; parameters[n_nodes * PARAM_Q10KmNai + i] = 1.39; parameters[n_nodes * PARAM_Q10NaK + i] = 1.63; parameters[n_nodes * PARAM_GKr + i] = 0.035; parameters[n_nodes * PARAM_GKp + i] = 0.002; parameters[n_nodes * PARAM_GKs + i] = 0.0035; parameters[n_nodes * PARAM_pNaK + i] = 0.01833; parameters[n_nodes * PARAM_GK1 + i] = 0.35; parameters[n_nodes * PARAM_Gto + i] = 0.13; parameters[n_nodes * PARAM_epi + i] = 1.0; parameters[n_nodes * PARAM_GClB + i] = 0.009; parameters[n_nodes * PARAM_GClCa + i] = 0.0548125; parameters[n_nodes * PARAM_KdClCa + i] = 0.1; parameters[n_nodes * PARAM_GCaL + i] = 0.5; parameters[n_nodes * PARAM_Q10CaL + i] = 1.8; parameters[n_nodes * PARAM_pCa + i] = 0.00054; parameters[n_nodes * PARAM_pK + i] = 2.7e-07; parameters[n_nodes * PARAM_pNa + i] = 1.5e-08; parameters[n_nodes * PARAM_IbarNCX + i] = 4.5; parameters[n_nodes * PARAM_Kdact + i] = 0.00015; parameters[n_nodes * PARAM_KmCai + i] = 0.00359; parameters[n_nodes * PARAM_KmCao + i] = 1.3; parameters[n_nodes * PARAM_KmNai + i] = 12.29; parameters[n_nodes * PARAM_KmNao + i] = 87.5; parameters[n_nodes * PARAM_Q10NCX + i] = 1.57; parameters[n_nodes * PARAM_ksat + i] = 0.32; parameters[n_nodes * PARAM_nu + i] = 0.27; parameters[n_nodes * PARAM_IbarSLCaP + i] = 0.0673; parameters[n_nodes * PARAM_KmPCa + i] = 0.0005; parameters[n_nodes * PARAM_Q10SLCaP + i] = 2.35; parameters[n_nodes * PARAM_GCaB + i] = 0.0005513; parameters[n_nodes * PARAM_Kmf + i] = 0.000246; parameters[n_nodes * PARAM_Kmr + i] = 1.7; parameters[n_nodes * PARAM_MaxSR + i] = 15.0; parameters[n_nodes * PARAM_MinSR + i] = 1.0; parameters[n_nodes * PARAM_Q10SRCaP + i] = 2.6; parameters[n_nodes * PARAM_Vmax_SRCaP + i] = 0.0053114; parameters[n_nodes * PARAM_ec50SR + i] = 0.45; parameters[n_nodes * PARAM_hillSRCaP + i] = 1.787; parameters[n_nodes * PARAM_kiCa + i] = 0.5; parameters[n_nodes * PARAM_kim + i] = 0.005; parameters[n_nodes * PARAM_koCa + i] = 10.0; parameters[n_nodes * PARAM_kom + i] = 0.06; parameters[n_nodes * PARAM_ks + i] = 25.0; parameters[n_nodes * PARAM_Bmax_Naj + i] = 7.561; parameters[n_nodes * PARAM_Bmax_Nasl + i] = 1.65; parameters[n_nodes * PARAM_koff_na + i] = 0.001; parameters[n_nodes * PARAM_kon_na + i] = 0.0001; parameters[n_nodes * PARAM_Bmax_CaM + i] = 0.024; parameters[n_nodes * PARAM_Bmax_SR + i] = 0.0171; parameters[n_nodes * PARAM_Bmax_TnChigh + i] = 0.14; parameters[n_nodes * PARAM_Bmax_TnClow + i] = 0.07; parameters[n_nodes * PARAM_Bmax_myosin + i] = 0.14; parameters[n_nodes * PARAM_koff_cam + i] = 0.238; parameters[n_nodes * PARAM_koff_myoca + i] = 0.00046; parameters[n_nodes * PARAM_koff_myomg + i] = 5.7e-05; parameters[n_nodes * PARAM_koff_sr + i] = 0.06; parameters[n_nodes * PARAM_koff_tnchca + i] = 3.2e-05; parameters[n_nodes * PARAM_koff_tnchmg + i] = 0.00333; parameters[n_nodes * PARAM_koff_tncl + i] = 0.0196; parameters[n_nodes * PARAM_kon_cam + i] = 34.0; parameters[n_nodes * PARAM_kon_myoca + i] = 13.8; parameters[n_nodes * PARAM_kon_myomg + i] = 0.0157; parameters[n_nodes * PARAM_kon_sr + i] = 100.0; parameters[n_nodes * PARAM_kon_tnchca + i] = 2.37; parameters[n_nodes * PARAM_kon_tnchmg + i] = 0.003; parameters[n_nodes * PARAM_kon_tncl + i] = 32.7; parameters[n_nodes * PARAM_Bmax_SLhighj0 + i] = 0.000165; parameters[n_nodes * PARAM_Bmax_SLhighsl0 + i] = 0.0134; parameters[n_nodes * PARAM_Bmax_SLlowj0 + i] = 0.00046; parameters[n_nodes * PARAM_Bmax_SLlowsl0 + i] = 0.0374; parameters[n_nodes * PARAM_koff_slh + i] = 0.03; parameters[n_nodes * PARAM_koff_sll + i] = 1.3; parameters[n_nodes * PARAM_kon_slh + i] = 100.0; parameters[n_nodes * PARAM_kon_sll + i] = 100.0; parameters[n_nodes * PARAM_Bmax_Csqn0 + i] = 0.14; parameters[n_nodes * PARAM_DcaJuncSL + i] = 1.64e-06; parameters[n_nodes * PARAM_DcaSLcyto + i] = 1.22e-06; parameters[n_nodes * PARAM_J_ca_juncsl + i] = 8.2413e-13; parameters[n_nodes * PARAM_J_ca_slmyo + i] = 3.7243e-12; parameters[n_nodes * PARAM_koff_csqn + i] = 65.0; parameters[n_nodes * PARAM_kon_csqn + i] = 100.0; parameters[n_nodes * PARAM_DnaJuncSL + i] = 1.09e-05; parameters[n_nodes * PARAM_DnaSLcyto + i] = 1.79e-05; parameters[n_nodes * PARAM_J_na_juncsl + i] = 1.8313e-14; parameters[n_nodes * PARAM_J_na_slmyo + i] = 1.6386e-12; parameters[n_nodes * PARAM_Nao + i] = 140.0; parameters[n_nodes * PARAM_Ko + i] = 5.4; parameters[n_nodes * PARAM_Cao + i] = 1.8; parameters[n_nodes * PARAM_Cli + i] = 15.0; parameters[n_nodes * PARAM_Clo + i] = 150.0; parameters[n_nodes * PARAM_Mgi + i] = 1.0; parameters[n_nodes * PARAM_Cmem + i] = 1.381e-10; parameters[n_nodes * PARAM_Frdy + i] = 96485.0; parameters[n_nodes * PARAM_R + i] = 8314.0; parameters[n_nodes * PARAM_Temp + i] = 310.0; parameters[n_nodes * PARAM_stim_amplitude + i] = 40.0; parameters[n_nodes * PARAM_stim_duration + i] = 1.0; parameters[n_nodes * PARAM_stim_period + i] = 1000.0; parameters[n_nodes * PARAM_stim_start + i] = 0.0; } } void init_parameter_values_2d_from_array(double* parameters, double *values, uint64_t n_nodes) { #pragma omp parallel for for (uint64_t i = 0; i < n_nodes; i++) { parameters[n_nodes * PARAM_Fjunc + i] = values[PARAM_Fjunc]; parameters[n_nodes * PARAM_Fjunc_CaL + i] = values[PARAM_Fjunc_CaL]; parameters[n_nodes * PARAM_cellLength + i] = values[PARAM_cellLength]; parameters[n_nodes * PARAM_cellRadius + i] = values[PARAM_cellRadius]; parameters[n_nodes * PARAM_distJuncSL + i] = values[PARAM_distJuncSL]; parameters[n_nodes * PARAM_distSLcyto + i] = values[PARAM_distSLcyto]; parameters[n_nodes * PARAM_junctionLength + i] = values[PARAM_junctionLength]; parameters[n_nodes * PARAM_junctionRadius + i] = values[PARAM_junctionRadius]; parameters[n_nodes * PARAM_GNa + i] = values[PARAM_GNa]; parameters[n_nodes * PARAM_GNaB + i] = values[PARAM_GNaB]; parameters[n_nodes * PARAM_IbarNaK + i] = values[PARAM_IbarNaK]; parameters[n_nodes * PARAM_KmKo + i] = values[PARAM_KmKo]; parameters[n_nodes * PARAM_KmNaip + i] = values[PARAM_KmNaip]; parameters[n_nodes * PARAM_Q10KmNai + i] = values[PARAM_Q10KmNai]; parameters[n_nodes * PARAM_Q10NaK + i] = values[PARAM_Q10NaK]; parameters[n_nodes * PARAM_GKr + i] = values[PARAM_GKr]; parameters[n_nodes * PARAM_GKp + i] = values[PARAM_GKp]; parameters[n_nodes * PARAM_GKs + i] = values[PARAM_GKs]; parameters[n_nodes * PARAM_pNaK + i] = values[PARAM_pNaK]; parameters[n_nodes * PARAM_GK1 + i] = values[PARAM_GK1]; parameters[n_nodes * PARAM_Gto + i] = values[PARAM_Gto]; parameters[n_nodes * PARAM_epi + i] = values[PARAM_epi]; parameters[n_nodes * PARAM_GClB + i] = values[PARAM_GClB]; parameters[n_nodes * PARAM_GClCa + i] = values[PARAM_GClCa]; parameters[n_nodes * PARAM_KdClCa + i] = values[PARAM_KdClCa]; parameters[n_nodes * PARAM_GCaL + i] = values[PARAM_GCaL]; parameters[n_nodes * PARAM_Q10CaL + i] = values[PARAM_Q10CaL]; parameters[n_nodes * PARAM_pCa + i] = values[PARAM_pCa]; parameters[n_nodes * PARAM_pK + i] = values[PARAM_pK]; parameters[n_nodes * PARAM_pNa + i] = values[PARAM_pNa]; parameters[n_nodes * PARAM_IbarNCX + i] = values[PARAM_IbarNCX]; parameters[n_nodes * PARAM_Kdact + i] = values[PARAM_Kdact]; parameters[n_nodes * PARAM_KmCai + i] = values[PARAM_KmCai]; parameters[n_nodes * PARAM_KmCao + i] = values[PARAM_KmCao]; parameters[n_nodes * PARAM_KmNai + i] = values[PARAM_KmNai]; parameters[n_nodes * PARAM_KmNao + i] = values[PARAM_KmNao]; parameters[n_nodes * PARAM_Q10NCX + i] = values[PARAM_Q10NCX]; parameters[n_nodes * PARAM_ksat + i] = values[PARAM_ksat]; parameters[n_nodes * PARAM_nu + i] = values[PARAM_nu]; parameters[n_nodes * PARAM_IbarSLCaP + i] = values[PARAM_IbarSLCaP]; parameters[n_nodes * PARAM_KmPCa + i] = values[PARAM_KmPCa]; parameters[n_nodes * PARAM_Q10SLCaP + i] = values[PARAM_Q10SLCaP]; parameters[n_nodes * PARAM_GCaB + i] = values[PARAM_GCaB]; parameters[n_nodes * PARAM_Kmf + i] = values[PARAM_Kmf]; parameters[n_nodes * PARAM_Kmr + i] = values[PARAM_Kmr]; parameters[n_nodes * PARAM_MaxSR + i] = values[PARAM_MaxSR]; parameters[n_nodes * PARAM_MinSR + i] = values[PARAM_MinSR]; parameters[n_nodes * PARAM_Q10SRCaP + i] = values[PARAM_Q10SRCaP]; parameters[n_nodes * PARAM_Vmax_SRCaP + i] = values[PARAM_Vmax_SRCaP]; parameters[n_nodes * PARAM_ec50SR + i] = values[PARAM_ec50SR]; parameters[n_nodes * PARAM_hillSRCaP + i] = values[PARAM_hillSRCaP]; parameters[n_nodes * PARAM_kiCa + i] = values[PARAM_kiCa]; parameters[n_nodes * PARAM_kim + i] = values[PARAM_kim]; parameters[n_nodes * PARAM_koCa + i] = values[PARAM_koCa]; parameters[n_nodes * PARAM_kom + i] = values[PARAM_kom]; parameters[n_nodes * PARAM_ks + i] = values[PARAM_ks]; parameters[n_nodes * PARAM_Bmax_Naj + i] = values[PARAM_Bmax_Naj]; parameters[n_nodes * PARAM_Bmax_Nasl + i] = values[PARAM_Bmax_Nasl]; parameters[n_nodes * PARAM_koff_na + i] = values[PARAM_koff_na]; parameters[n_nodes * PARAM_kon_na + i] = values[PARAM_kon_na]; parameters[n_nodes * PARAM_Bmax_CaM + i] = values[PARAM_Bmax_CaM]; parameters[n_nodes * PARAM_Bmax_SR + i] = values[PARAM_Bmax_SR]; parameters[n_nodes * PARAM_Bmax_TnChigh + i] = values[PARAM_Bmax_TnChigh]; parameters[n_nodes * PARAM_Bmax_TnClow + i] = values[PARAM_Bmax_TnClow]; parameters[n_nodes * PARAM_Bmax_myosin + i] = values[PARAM_Bmax_myosin]; parameters[n_nodes * PARAM_koff_cam + i] = values[PARAM_koff_cam]; parameters[n_nodes * PARAM_koff_myoca + i] = values[PARAM_koff_myoca]; parameters[n_nodes * PARAM_koff_myomg + i] = values[PARAM_koff_myomg]; parameters[n_nodes * PARAM_koff_sr + i] = values[PARAM_koff_sr]; parameters[n_nodes * PARAM_koff_tnchca + i] = values[PARAM_koff_tnchca]; parameters[n_nodes * PARAM_koff_tnchmg + i] = values[PARAM_koff_tnchmg]; parameters[n_nodes * PARAM_koff_tncl + i] = values[PARAM_koff_tncl]; parameters[n_nodes * PARAM_kon_cam + i] = values[PARAM_kon_cam]; parameters[n_nodes * PARAM_kon_myoca + i] = values[PARAM_kon_myoca]; parameters[n_nodes * PARAM_kon_myomg + i] = values[PARAM_kon_myomg]; parameters[n_nodes * PARAM_kon_sr + i] = values[PARAM_kon_sr]; parameters[n_nodes * PARAM_kon_tnchca + i] = values[PARAM_kon_tnchca]; parameters[n_nodes * PARAM_kon_tnchmg + i] = values[PARAM_kon_tnchmg]; parameters[n_nodes * PARAM_kon_tncl + i] = values[PARAM_kon_tncl]; parameters[n_nodes * PARAM_Bmax_SLhighj0 + i] = values[PARAM_Bmax_SLhighj0]; parameters[n_nodes * PARAM_Bmax_SLhighsl0 + i] = values[PARAM_Bmax_SLhighsl0]; parameters[n_nodes * PARAM_Bmax_SLlowj0 + i] = values[PARAM_Bmax_SLlowj0]; parameters[n_nodes * PARAM_Bmax_SLlowsl0 + i] = values[PARAM_Bmax_SLlowsl0]; parameters[n_nodes * PARAM_koff_slh + i] = values[PARAM_koff_slh]; parameters[n_nodes * PARAM_koff_sll + i] = values[PARAM_koff_sll]; parameters[n_nodes * PARAM_kon_slh + i] = values[PARAM_kon_slh]; parameters[n_nodes * PARAM_kon_sll + i] = values[PARAM_kon_sll]; parameters[n_nodes * PARAM_Bmax_Csqn0 + i] = values[PARAM_Bmax_Csqn0]; parameters[n_nodes * PARAM_DcaJuncSL + i] = values[PARAM_DcaJuncSL]; parameters[n_nodes * PARAM_DcaSLcyto + i] = values[PARAM_DcaSLcyto]; parameters[n_nodes * PARAM_J_ca_juncsl + i] = values[PARAM_J_ca_juncsl]; parameters[n_nodes * PARAM_J_ca_slmyo + i] = values[PARAM_J_ca_slmyo]; parameters[n_nodes * PARAM_koff_csqn + i] = values[PARAM_koff_csqn]; parameters[n_nodes * PARAM_kon_csqn + i] = values[PARAM_kon_csqn]; parameters[n_nodes * PARAM_DnaJuncSL + i] = values[PARAM_DnaJuncSL]; parameters[n_nodes * PARAM_DnaSLcyto + i] = values[PARAM_DnaSLcyto]; parameters[n_nodes * PARAM_J_na_juncsl + i] = values[PARAM_J_na_juncsl]; parameters[n_nodes * PARAM_J_na_slmyo + i] = values[PARAM_J_na_slmyo]; parameters[n_nodes * PARAM_Nao + i] = values[PARAM_Nao]; parameters[n_nodes * PARAM_Ko + i] = values[PARAM_Ko]; parameters[n_nodes * PARAM_Cao + i] = values[PARAM_Cao]; parameters[n_nodes * PARAM_Cli + i] = values[PARAM_Cli]; parameters[n_nodes * PARAM_Clo + i] = values[PARAM_Clo]; parameters[n_nodes * PARAM_Mgi + i] = values[PARAM_Mgi]; parameters[n_nodes * PARAM_Cmem + i] = values[PARAM_Cmem]; parameters[n_nodes * PARAM_Frdy + i] = values[PARAM_Frdy]; parameters[n_nodes * PARAM_R + i] = values[PARAM_R]; parameters[n_nodes * PARAM_Temp + i] = values[PARAM_Temp]; parameters[n_nodes * PARAM_stim_amplitude + i] = values[PARAM_stim_amplitude]; parameters[n_nodes * PARAM_stim_duration + i] = values[PARAM_stim_duration]; parameters[n_nodes * PARAM_stim_period + i] = values[PARAM_stim_period]; parameters[n_nodes * PARAM_stim_start + i] = values[PARAM_stim_start]; } } // Compute a forward step using the explicit Euler algorithm to the grandi ODE void FE(double *d_states, const double t, const double dt, const double *d_parameters, const uint64_t n_nodes) { #pragma omp parallel for for (uint64_t i = 0; i < n_nodes; i++) { // Assign states const double m = d_states[n_nodes * STATE_m + i]; const double h = d_states[n_nodes * STATE_h + i]; const double j = d_states[n_nodes * STATE_j + i]; const double x_kr = d_states[n_nodes * STATE_x_kr + i]; const double x_ks = d_states[n_nodes * STATE_x_ks + i]; const double x_to_s = d_states[n_nodes * STATE_x_to_s + i]; const double y_to_s = d_states[n_nodes * STATE_y_to_s + i]; const double x_to_f = d_states[n_nodes * STATE_x_to_f + i]; const double y_to_f = d_states[n_nodes * STATE_y_to_f + i]; const double d = d_states[n_nodes * STATE_d + i]; const double f = d_states[n_nodes * STATE_f + i]; const double f_Ca_Bj = d_states[n_nodes * STATE_f_Ca_Bj + i]; const double f_Ca_Bsl = d_states[n_nodes * STATE_f_Ca_Bsl + i]; const double Ry_Rr = d_states[n_nodes * STATE_Ry_Rr + i]; const double Ry_Ro = d_states[n_nodes * STATE_Ry_Ro + i]; const double Ry_Ri = d_states[n_nodes * STATE_Ry_Ri + i]; const double Na_Bj = d_states[n_nodes * STATE_Na_Bj + i]; const double Na_Bsl = d_states[n_nodes * STATE_Na_Bsl + i]; const double Tn_CL = d_states[n_nodes * STATE_Tn_CL + i]; const double Tn_CHc = d_states[n_nodes * STATE_Tn_CHc + i]; const double Tn_CHm = d_states[n_nodes * STATE_Tn_CHm + i]; const double CaM = d_states[n_nodes * STATE_CaM + i]; const double Myo_c = d_states[n_nodes * STATE_Myo_c + i]; const double Myo_m = d_states[n_nodes * STATE_Myo_m + i]; const double SRB = d_states[n_nodes * STATE_SRB + i]; const double SLL_j = d_states[n_nodes * STATE_SLL_j + i]; const double SLL_sl = d_states[n_nodes * STATE_SLL_sl + i]; const double SLH_j = d_states[n_nodes * STATE_SLH_j + i]; const double SLH_sl = d_states[n_nodes * STATE_SLH_sl + i]; const double Csqn_b = d_states[n_nodes * STATE_Csqn_b + i]; const double Ca_sr = d_states[n_nodes * STATE_Ca_sr + i]; const double Na_j = d_states[n_nodes * STATE_Na_j + i]; const double Na_sl = d_states[n_nodes * STATE_Na_sl + i]; const double Na_i = d_states[n_nodes * STATE_Na_i + i]; const double K_i = d_states[n_nodes * STATE_K_i + i]; const double Ca_j = d_states[n_nodes * STATE_Ca_j + i]; const double Ca_sl = d_states[n_nodes * STATE_Ca_sl + i]; const double Ca_i = d_states[n_nodes * STATE_Ca_i + i]; const double V_m = d_states[n_nodes * STATE_V_m + i]; // Assign parameters const double Fjunc = d_parameters[n_nodes * PARAM_Fjunc + i]; const double Fjunc_CaL = d_parameters[n_nodes * PARAM_Fjunc_CaL + i]; const double cellLength = d_parameters[n_nodes * PARAM_cellLength + i]; const double cellRadius = d_parameters[n_nodes * PARAM_cellRadius + i]; const double GNa = d_parameters[n_nodes * PARAM_GNa + i]; const double GNaB = d_parameters[n_nodes * PARAM_GNaB + i]; const double IbarNaK = d_parameters[n_nodes * PARAM_IbarNaK + i]; const double KmKo = d_parameters[n_nodes * PARAM_KmKo + i]; const double KmNaip = d_parameters[n_nodes * PARAM_KmNaip + i]; const double GKr = d_parameters[n_nodes * PARAM_GKr + i]; const double GKp = d_parameters[n_nodes * PARAM_GKp + i]; const double GKs = d_parameters[n_nodes * PARAM_GKs + i]; const double pNaK = d_parameters[n_nodes * PARAM_pNaK + i]; const double GK1 = d_parameters[n_nodes * PARAM_GK1 + i]; const double Gto = d_parameters[n_nodes * PARAM_Gto + i]; const double epi = d_parameters[n_nodes * PARAM_epi + i]; const double GClB = d_parameters[n_nodes * PARAM_GClB + i]; const double GClCa = d_parameters[n_nodes * PARAM_GClCa + i]; const double KdClCa = d_parameters[n_nodes * PARAM_KdClCa + i]; const double GCaL = d_parameters[n_nodes * PARAM_GCaL + i]; const double Q10CaL = d_parameters[n_nodes * PARAM_Q10CaL + i]; const double pCa = d_parameters[n_nodes * PARAM_pCa + i]; const double pK = d_parameters[n_nodes * PARAM_pK + i]; const double pNa = d_parameters[n_nodes * PARAM_pNa + i]; const double IbarNCX = d_parameters[n_nodes * PARAM_IbarNCX + i]; const double Kdact = d_parameters[n_nodes * PARAM_Kdact + i]; const double KmCai = d_parameters[n_nodes * PARAM_KmCai + i]; const double KmCao = d_parameters[n_nodes * PARAM_KmCao + i]; const double KmNai = d_parameters[n_nodes * PARAM_KmNai + i]; const double KmNao = d_parameters[n_nodes * PARAM_KmNao + i]; const double Q10NCX = d_parameters[n_nodes * PARAM_Q10NCX + i]; const double ksat = d_parameters[n_nodes * PARAM_ksat + i]; const double nu = d_parameters[n_nodes * PARAM_nu + i]; const double IbarSLCaP = d_parameters[n_nodes * PARAM_IbarSLCaP + i]; const double KmPCa = d_parameters[n_nodes * PARAM_KmPCa + i]; const double Q10SLCaP = d_parameters[n_nodes * PARAM_Q10SLCaP + i]; const double GCaB = d_parameters[n_nodes * PARAM_GCaB + i]; const double Kmf = d_parameters[n_nodes * PARAM_Kmf + i]; const double Kmr = d_parameters[n_nodes * PARAM_Kmr + i]; const double MaxSR = d_parameters[n_nodes * PARAM_MaxSR + i]; const double MinSR = d_parameters[n_nodes * PARAM_MinSR + i]; const double Q10SRCaP = d_parameters[n_nodes * PARAM_Q10SRCaP + i]; const double Vmax_SRCaP = d_parameters[n_nodes * PARAM_Vmax_SRCaP + i]; const double ec50SR = d_parameters[n_nodes * PARAM_ec50SR + i]; const double hillSRCaP = d_parameters[n_nodes * PARAM_hillSRCaP + i]; const double kiCa = d_parameters[n_nodes * PARAM_kiCa + i]; const double kim = d_parameters[n_nodes * PARAM_kim + i]; const double koCa = d_parameters[n_nodes * PARAM_koCa + i]; const double kom = d_parameters[n_nodes * PARAM_kom + i]; const double ks = d_parameters[n_nodes * PARAM_ks + i]; const double Bmax_Naj = d_parameters[n_nodes * PARAM_Bmax_Naj + i]; const double Bmax_Nasl = d_parameters[n_nodes * PARAM_Bmax_Nasl + i]; const double koff_na = d_parameters[n_nodes * PARAM_koff_na + i]; const double kon_na = d_parameters[n_nodes * PARAM_kon_na + i]; const double Bmax_CaM = d_parameters[n_nodes * PARAM_Bmax_CaM + i]; const double Bmax_SR = d_parameters[n_nodes * PARAM_Bmax_SR + i]; const double Bmax_TnChigh = d_parameters[n_nodes * PARAM_Bmax_TnChigh + i]; const double Bmax_TnClow = d_parameters[n_nodes * PARAM_Bmax_TnClow + i]; const double Bmax_myosin = d_parameters[n_nodes * PARAM_Bmax_myosin + i]; const double koff_cam = d_parameters[n_nodes * PARAM_koff_cam + i]; const double koff_myoca = d_parameters[n_nodes * PARAM_koff_myoca + i]; const double koff_myomg = d_parameters[n_nodes * PARAM_koff_myomg + i]; const double koff_sr = d_parameters[n_nodes * PARAM_koff_sr + i]; const double koff_tnchca = d_parameters[n_nodes * PARAM_koff_tnchca + i]; const double koff_tnchmg = d_parameters[n_nodes * PARAM_koff_tnchmg + i]; const double koff_tncl = d_parameters[n_nodes * PARAM_koff_tncl + i]; const double kon_cam = d_parameters[n_nodes * PARAM_kon_cam + i]; const double kon_myoca = d_parameters[n_nodes * PARAM_kon_myoca + i]; const double kon_myomg = d_parameters[n_nodes * PARAM_kon_myomg + i]; const double kon_sr = d_parameters[n_nodes * PARAM_kon_sr + i]; const double kon_tnchca = d_parameters[n_nodes * PARAM_kon_tnchca + i]; const double kon_tnchmg = d_parameters[n_nodes * PARAM_kon_tnchmg + i]; const double kon_tncl = d_parameters[n_nodes * PARAM_kon_tncl + i]; const double Bmax_SLhighj0 = d_parameters[n_nodes * PARAM_Bmax_SLhighj0 + i]; const double Bmax_SLhighsl0 = d_parameters[n_nodes * PARAM_Bmax_SLhighsl0 + i]; const double Bmax_SLlowj0 = d_parameters[n_nodes * PARAM_Bmax_SLlowj0 + i]; const double Bmax_SLlowsl0 = d_parameters[n_nodes * PARAM_Bmax_SLlowsl0 + i]; const double koff_slh = d_parameters[n_nodes * PARAM_koff_slh + i]; const double koff_sll = d_parameters[n_nodes * PARAM_koff_sll + i]; const double kon_slh = d_parameters[n_nodes * PARAM_kon_slh + i]; const double kon_sll = d_parameters[n_nodes * PARAM_kon_sll + i]; const double Bmax_Csqn0 = d_parameters[n_nodes * PARAM_Bmax_Csqn0 + i]; const double J_ca_juncsl = d_parameters[n_nodes * PARAM_J_ca_juncsl + i]; const double J_ca_slmyo = d_parameters[n_nodes * PARAM_J_ca_slmyo + i]; const double koff_csqn = d_parameters[n_nodes * PARAM_koff_csqn + i]; const double kon_csqn = d_parameters[n_nodes * PARAM_kon_csqn + i]; const double J_na_juncsl = d_parameters[n_nodes * PARAM_J_na_juncsl + i]; const double J_na_slmyo = d_parameters[n_nodes * PARAM_J_na_slmyo + i]; const double Nao = d_parameters[n_nodes * PARAM_Nao + i]; const double Ko = d_parameters[n_nodes * PARAM_Ko + i]; const double Cao = d_parameters[n_nodes * PARAM_Cao + i]; const double Cli = d_parameters[n_nodes * PARAM_Cli + i]; const double Clo = d_parameters[n_nodes * PARAM_Clo + i]; const double Mgi = d_parameters[n_nodes * PARAM_Mgi + i]; const double Cmem = d_parameters[n_nodes * PARAM_Cmem + i]; const double Frdy = d_parameters[n_nodes * PARAM_Frdy + i]; const double R = d_parameters[n_nodes * PARAM_R + i]; const double Temp = d_parameters[n_nodes * PARAM_Temp + i]; const double stim_amplitude = d_parameters[n_nodes * PARAM_stim_amplitude + i]; const double stim_duration = d_parameters[n_nodes * PARAM_stim_duration + i]; const double stim_period = d_parameters[n_nodes * PARAM_stim_period + i]; const double stim_start = d_parameters[n_nodes * PARAM_stim_start + i]; // Expressions for the Geometry component const double Vcell = 1.0e-15 * M_PI * cellLength * (cellRadius * cellRadius); const double Vmyo = 0.65 * Vcell; const double Vsr = 0.035 * Vcell; const double Vsl = 0.02 * Vcell; const double Vjunc = 0.000539 * Vcell; const double Fsl = 1. - Fjunc; const double Fsl_CaL = 1. - Fjunc_CaL; // Expressions for the Reversal potentials component const double FoRT = Frdy / (R * Temp); const double ena_junc = log(Nao / Na_j) / FoRT; const double ena_sl = log(Nao / Na_sl) / FoRT; const double ek = log(Ko / K_i) / FoRT; const double eca_junc = log(Cao / Ca_j) / (2. * FoRT); const double eca_sl = log(Cao / Ca_sl) / (2. * FoRT); const double ecl = log(Cli / Clo) / FoRT; const double Qpow = -31. + Temp / 10.; // Expressions for the I_Na component const double mss = 1.0 / ((1. + 0.00184221158116513 * exp(-0.110741971207087 * V_m)) * (1. + 0.00184221158116513 * exp(-0.110741971207087 * V_m))); const double taum = 0.1292 * exp(-((2.94658944658945 + 0.0643500643500644 * V_m) * (2.94658944658945 + 0.0643500643500644 * V_m))) + 0.06487 * exp(-((-0.0943466353677621 + 0.0195618153364632 * V_m) * (-0.0943466353677621 + 0.0195618153364632 * V_m))); const double ah = (V_m >= -40. ? 0. : 4.43126792958051e-7 * exp(-0.147058823529412 * V_m)); const double bh = (V_m >= -40. ? 0.77 / (0.13 + 0.0497581410839387 * exp(-0.0900900900900901 * V_m)) : 310000.0 * exp(0.3485 * V_m) + 2.7 * exp(0.079 * V_m)); const double tauh = 1.0 / (ah + bh); const double hss = 1.0 / ((1. + 15212.5932856544 * exp(0.134589502018843 * V_m)) * (1. + 15212.5932856544 * exp(0.134589502018843 * V_m))); const double aj = (V_m >= -40. ? 0. : (37.78 + V_m) * (-25428.0 * exp(0.2444 * V_m) - 6.948e-6 * exp(-0.04391 * V_m)) / (1. + 50262745825.954 * exp(0.311 * V_m))); const double bj = (V_m >= -40. ? 0.6 * exp(0.057 * V_m) / (1. + 0.0407622039783662 * exp(-0.1 * V_m)) : 0.02424 * exp(-0.01052 * V_m) / (1. + 0.00396086833990426 * exp(-0.1378 * V_m))); const double tauj = 1.0 / (aj + bj); const double jss = 1.0 / ((1. + 15212.5932856544 * exp(0.134589502018843 * V_m)) * (1. + 15212.5932856544 * exp(0.134589502018843 * V_m))); const double dm_dt = (-m + mss) / taum; d_states[n_nodes * STATE_m + i] = dt * dm_dt + m; const double dh_dt = (-h + hss) / tauh; d_states[n_nodes * STATE_h + i] = dt * dh_dt + h; const double dj_dt = (-j + jss) / tauj; d_states[n_nodes * STATE_j + i] = dt * dj_dt + j; const double I_Na_junc = Fjunc * GNa * (m * m * m) * (-ena_junc + V_m) * h * j; const double I_Na_sl = GNa * (m * m * m) * (-ena_sl + V_m) * Fsl * h * j; // Expressions for the I_NaBK component const double I_nabk_junc = Fjunc * GNaB * (-ena_junc + V_m); const double I_nabk_sl = GNaB * (-ena_sl + V_m) * Fsl; // Expressions for the I_NaK component const double sigma = -1. / 7. + exp(0.0148588410104012 * Nao) / 7.; const double fnak = 1.0 / (1. + 0.1245 * exp(-0.1 * FoRT * V_m) + 0.0365 * exp(-FoRT * V_m) * sigma); const double I_nak_junc = Fjunc * IbarNaK * Ko * fnak / ((1. + (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) / (((Na_j) * (Na_j)) * ((Na_j) * (Na_j)))) * (KmKo + Ko)); const double I_nak_sl = IbarNaK * Ko * Fsl * fnak / ((1. + (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) / (((Na_sl) * (Na_sl)) * ((Na_sl) * (Na_sl)))) * (KmKo + Ko)); const double I_nak = I_nak_junc + I_nak_sl; // Expressions for the I_Kr component const double gkr = 0.430331482911935 * GKr * sqrt(Ko); const double xrss = 1.0 / (1. + exp(-2. - V_m / 5.)); const double tauxr = 230. / (1. + exp(2. + V_m / 20.)) + 3300. / ((1. + exp(-22. / 9. - V_m / 9.)) * (1. + exp(11. / 9. + V_m / 9.))); const double dx_kr_dt = (-x_kr + xrss) / tauxr; d_states[n_nodes * STATE_x_kr + i] = dt * dx_kr_dt + x_kr; const double rkr = 1.0 / (1. + exp(37. / 12. + V_m / 24.)); const double I_kr = (-ek + V_m) * gkr * rkr * x_kr; // Expressions for the I_Kp component const double kp_kp = 1.0 / (1. + 1786.47556537862 * exp(-0.167224080267559 * V_m)); const double I_kp_junc = Fjunc * GKp * (-ek + V_m) * kp_kp; const double I_kp_sl = GKp * (-ek + V_m) * Fsl * kp_kp; const double I_kp = I_kp_junc + I_kp_sl; // Expressions for the I_Ks component const double eks = log((Ko + Nao * pNaK) / (pNaK * Na_i + K_i)) / FoRT; const double gks_junc = GKs; const double gks_sl = GKs; const double xsss = 1.0 / (1. + 0.765928338364649 * exp(-0.0701754385964912 * V_m)); const double tauxs = 990.1 / (1. + 0.841540408868102 * exp(-0.0708215297450425 * V_m)); const double dx_ks_dt = (-x_ks + xsss) / tauxs; d_states[n_nodes * STATE_x_ks + i] = dt * dx_ks_dt + x_ks; const double I_ks_junc = Fjunc * (x_ks * x_ks) * (-eks + V_m) * gks_junc; const double I_ks_sl = (x_ks * x_ks) * (-eks + V_m) * Fsl * gks_sl; const double I_ks = I_ks_junc + I_ks_sl; // Expressions for the I_to component const double GtoSlow = (epi == 1. ? 0.12 * Gto : 0.2892 * Gto); const double GtoFast = (epi == 1. ? 0.88 * Gto : 0.0108 * Gto); const double xtoss = 1.0 / (1. + exp(19. / 13. - V_m / 13.)); const double ytoss = 1.0 / (1. + 49.4024491055302 * exp(V_m / 5.)); const double tauxtos = 0.5 + 9. / (1. + exp(1. / 5. + V_m / 15.)); const double tauytos = 30. + 800. / (1. + exp(6. + V_m / 10.)); const double dx_to_s_dt = (-x_to_s + xtoss) / tauxtos; d_states[n_nodes * STATE_x_to_s + i] = dt * dx_to_s_dt + x_to_s; const double dy_to_s_dt = (-y_to_s + ytoss) / tauytos; d_states[n_nodes * STATE_y_to_s + i] = dt * dy_to_s_dt + y_to_s; const double I_tos = (-ek + V_m) * GtoSlow * x_to_s * y_to_s; const double tauxtof = 0.5 + 8.5 * exp(-((9. / 10. + V_m / 50.) * (9. / 10. + V_m / 50.))); const double tauytof = 7. + 85. * exp(-((40. + V_m) * (40. + V_m)) / 220.); const double dx_to_f_dt = (-x_to_f + xtoss) / tauxtof; d_states[n_nodes * STATE_x_to_f + i] = dt * dx_to_f_dt + x_to_f; const double dy_to_f_dt = (-y_to_f + ytoss) / tauytof; d_states[n_nodes * STATE_y_to_f + i] = dt * dy_to_f_dt + y_to_f; const double I_tof = (-ek + V_m) * GtoFast * x_to_f * y_to_f; const double I_to = I_tof + I_tos; // Expressions for the I_K1 component const double aki = 1.02 / (1. + 7.35454251046446e-7 * exp(0.2385 * V_m - 0.2385 * ek)); const double bki = (0.762624006506308 * exp(0.08032 * V_m - 0.08032 * ek) + 1.15340563518656e-16 * exp(0.06175 * V_m - 0.06175 * ek)) / (1. + 0.0867722941576933 * exp(0.5143 * ek - 0.5143 * V_m)); const double kiss = aki / (aki + bki); const double I_K1 = 0.430331482911935 * GK1 * sqrt(Ko) * (-ek + V_m) * kiss; // Expressions for the I_ClCa component const double I_ClCa_junc = Fjunc * GClCa * (-ecl + V_m) / (1. + KdClCa / Ca_j); const double I_ClCa_sl = GClCa * (-ecl + V_m) * Fsl / (1. + KdClCa / Ca_sl); const double I_ClCa = I_ClCa_junc + I_ClCa_sl; const double I_Clbk = GClB * (-ecl + V_m); // Expressions for the I_Ca component const double fss = 1.0 / (1. + exp(35. / 9. + V_m / 9.)) + 0.6 / (1. + exp(5. / 2. - V_m / 20.)); const double dss = 1.0 / (1. + exp(-5. / 6. - V_m / 6.)); const double taud = (1. - exp(-5. / 6. - V_m / 6.)) * dss / (0.175 + 0.035 * V_m); const double tauf = 1.0 / (0.02 + 0.0197 * exp(-((0.48865 + 0.0337 * V_m) * (0.48865 + 0.0337 * V_m)))); const double dd_dt = (-d + dss) / taud; d_states[n_nodes * STATE_d + i] = dt * dd_dt + d; const double df_dt = (-f + fss) / tauf; d_states[n_nodes * STATE_f + i] = dt * df_dt + f; const double df_Ca_Bj_dt = -0.0119 * f_Ca_Bj + 1.7 * (1. - f_Ca_Bj) * Ca_j; d_states[n_nodes * STATE_f_Ca_Bj + i] = dt * df_Ca_Bj_dt + f_Ca_Bj; const double df_Ca_Bsl_dt = -0.0119 * f_Ca_Bsl + 1.7 * (1. - f_Ca_Bsl) * Ca_sl; d_states[n_nodes * STATE_f_Ca_Bsl + i] = dt * df_Ca_Bsl_dt + f_Ca_Bsl; const double fcaCaMSL = 0.; const double fcaCaj = 0.; const double ibarca_j = 4. * Frdy * GCaL * pCa * (-0.341 * Cao + 0.341 * Ca_j * exp(2. * FoRT * V_m)) * FoRT * V_m / (-1. + exp(2. * FoRT * V_m)); const double ibarca_sl = 4. * Frdy * GCaL * pCa * (-0.341 * Cao + 0.341 * Ca_sl * exp(2. * FoRT * V_m)) * FoRT * V_m / (-1. + exp(2. * FoRT * V_m)); const double ibark = Frdy * GCaL * pK * (-0.75 * Ko + 0.75 * K_i * exp(FoRT * V_m)) * FoRT * V_m / (-1. + exp(FoRT * V_m)); const double ibarna_j = Frdy * GCaL * pNa * (-0.75 * Nao + 0.75 * Na_j * exp(FoRT * V_m)) * FoRT * V_m / (-1. + exp(FoRT * V_m)); const double ibarna_sl = Frdy * GCaL * pNa * (-0.75 * Nao + 0.75 * Na_sl * exp(FoRT * V_m)) * FoRT * V_m / (-1. + exp(FoRT * V_m)); const double I_Ca_junc = 0.45 * Fjunc_CaL * pow(Q10CaL, Qpow) * (1. + fcaCaj - f_Ca_Bj) * d * f * ibarca_j; const double I_Ca_sl = 0.45 * pow(Q10CaL, Qpow) * (1. + fcaCaMSL - f_Ca_Bsl) * Fsl_CaL * d * f * ibarca_sl; const double I_CaK = 0.45 * pow(Q10CaL, Qpow) * (Fjunc_CaL * (1. + fcaCaj - f_Ca_Bj) + (1. + fcaCaMSL - f_Ca_Bsl) * Fsl_CaL) * d * f * ibark; const double I_CaNa_junc = 0.45 * Fjunc_CaL * pow(Q10CaL, Qpow) * (1. + fcaCaj - f_Ca_Bj) * d * f * ibarna_j; const double I_CaNa_sl = 0.45 * pow(Q10CaL, Qpow) * (1. + fcaCaMSL - f_Ca_Bsl) * Fsl_CaL * d * f * ibarna_sl; // Expressions for the I_NCX component const double Ka_junc = 1.0 / (1. + (Kdact * Kdact) / (Ca_j * Ca_j)); const double Ka_sl = 1.0 / (1. + (Kdact * Kdact) / (Ca_sl * Ca_sl)); const double s1_junc = Cao * (Na_j * Na_j * Na_j) * exp(nu * FoRT * V_m); const double s1_sl = Cao * (Na_sl * Na_sl * Na_sl) * exp(nu * FoRT * V_m); const double s2_junc = (Nao * Nao * Nao) * Ca_j * exp((-1. + nu) * FoRT * V_m); const double s3_junc = Cao * (Na_j * Na_j * Na_j) + KmCao * (Na_j * Na_j * Na_j) + (Nao * Nao * Nao) * Ca_j + KmCai * (Nao * Nao * Nao) * (1. + (Na_j * Na_j * Na_j) / (KmNai * KmNai * KmNai)) + (KmNao * KmNao * KmNao) * (1. + Ca_j / KmCai) * Ca_j; const double s2_sl = (Nao * Nao * Nao) * Ca_sl * exp((-1. + nu) * FoRT * V_m); const double s3_sl = Cao * (Na_sl * Na_sl * Na_sl) + KmCao * (Na_sl * Na_sl * Na_sl) + (Nao * Nao * Nao) * Ca_sl + KmCai * (Nao * Nao * Nao) * (1. + (Na_sl * Na_sl * Na_sl) / (KmNai * KmNai * KmNai)) + (KmNao * KmNao * KmNao) * (1. + Ca_sl / KmCai) * Ca_sl; const double I_ncx_junc = Fjunc * IbarNCX * pow(Q10NCX, Qpow) * (-s2_junc + s1_junc) * Ka_junc / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_junc); const double I_ncx_sl = IbarNCX * pow(Q10NCX, Qpow) * (-s2_sl + s1_sl) * Fsl * Ka_sl / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_sl); // Expressions for the I_PCa component const double I_pca_junc = Fjunc * IbarSLCaP * pow(Q10SLCaP, Qpow) * pow(Ca_j, 1.6) / (pow(KmPCa, 1.6) + pow(Ca_j, 1.6)); const double I_pca_sl = IbarSLCaP * pow(Q10SLCaP, Qpow) * pow(Ca_sl, 1.6) * Fsl / (pow(KmPCa, 1.6) + pow(Ca_sl, 1.6)); // Expressions for the I_CaBK component const double I_cabk_junc = Fjunc * GCaB * (-eca_junc + V_m); const double I_cabk_sl = GCaB * (-eca_sl + V_m) * Fsl; // Expressions for the SR Fluxes component const double kCaSR = MaxSR - (MaxSR - MinSR) / (1. + pow(ec50SR / Ca_sr, 2.5)); const double koSRCa = koCa / kCaSR; const double kiSRCa = kiCa * kCaSR; const double RI = 1. - Ry_Ri - Ry_Ro - Ry_Rr; const double dRy_Rr_dt = kim * RI + kom * Ry_Ro - (Ca_j * Ca_j) * Ry_Rr * koSRCa - Ca_j * Ry_Rr * kiSRCa; d_states[n_nodes * STATE_Ry_Rr + i] = dt * dRy_Rr_dt + Ry_Rr; const double dRy_Ro_dt = kim * Ry_Ri - kom * Ry_Ro + (Ca_j * Ca_j) * Ry_Rr * koSRCa - Ca_j * Ry_Ro * kiSRCa; d_states[n_nodes * STATE_Ry_Ro + i] = dt * dRy_Ro_dt + Ry_Ro; const double dRy_Ri_dt = -kim * Ry_Ri - kom * Ry_Ri + (Ca_j * Ca_j) * RI * koSRCa + Ca_j * Ry_Ro * kiSRCa; d_states[n_nodes * STATE_Ry_Ri + i] = dt * dRy_Ri_dt + Ry_Ri; const double J_SRCarel = ks * (-Ca_j + Ca_sr) * Ry_Ro; const double J_serca = Vmax_SRCaP * pow(Q10SRCaP, Qpow) * (pow(Ca_i / Kmf, hillSRCaP) - pow(Ca_sr / Kmr, hillSRCaP)) / (1. + pow(Ca_i / Kmf, hillSRCaP) + pow(Ca_sr / Kmr, hillSRCaP)); const double J_SRleak = 5.348e-6 * Ca_sr - 5.348e-6 * Ca_j; // Expressions for the Na Buffers component const double dNa_Bj_dt = -koff_na * Na_Bj + kon_na * (Bmax_Naj - Na_Bj) * Na_j; d_states[n_nodes * STATE_Na_Bj + i] = dt * dNa_Bj_dt + Na_Bj; const double dNa_Bsl_dt = -koff_na * Na_Bsl + kon_na * (Bmax_Nasl - Na_Bsl) * Na_sl; d_states[n_nodes * STATE_Na_Bsl + i] = dt * dNa_Bsl_dt + Na_Bsl; // Expressions for the Cytosolic Ca Buffers component const double dTn_CL_dt = -koff_tncl * Tn_CL + kon_tncl * (Bmax_TnClow - Tn_CL) * Ca_i; d_states[n_nodes * STATE_Tn_CL + i] = dt * dTn_CL_dt + Tn_CL; const double dTn_CHc_dt = -koff_tnchca * Tn_CHc + kon_tnchca * (Bmax_TnChigh - Tn_CHc - Tn_CHm) * Ca_i; d_states[n_nodes * STATE_Tn_CHc + i] = dt * dTn_CHc_dt + Tn_CHc; const double dTn_CHm_dt = -koff_tnchmg * Tn_CHm + Mgi * kon_tnchmg * (Bmax_TnChigh - Tn_CHc - Tn_CHm); d_states[n_nodes * STATE_Tn_CHm + i] = dt * dTn_CHm_dt + Tn_CHm; const double dCaM_dt = -koff_cam * CaM + kon_cam * (Bmax_CaM - CaM) * Ca_i; d_states[n_nodes * STATE_CaM + i] = dt * dCaM_dt + CaM; const double dMyo_c_dt = -koff_myoca * Myo_c + kon_myoca * (Bmax_myosin - Myo_c - Myo_m) * Ca_i; d_states[n_nodes * STATE_Myo_c + i] = dt * dMyo_c_dt + Myo_c; const double dMyo_m_dt = -koff_myomg * Myo_m + Mgi * kon_myomg * (Bmax_myosin - Myo_c - Myo_m); d_states[n_nodes * STATE_Myo_m + i] = dt * dMyo_m_dt + Myo_m; const double dSRB_dt = -koff_sr * SRB + kon_sr * (Bmax_SR - SRB) * Ca_i; d_states[n_nodes * STATE_SRB + i] = dt * dSRB_dt + SRB; const double J_CaB_cytosol = -koff_cam * CaM - koff_myoca * Myo_c - koff_myomg * Myo_m - koff_sr * SRB - koff_tnchca * Tn_CHc - koff_tnchmg * Tn_CHm - koff_tncl * Tn_CL + Mgi * kon_myomg * (Bmax_myosin - Myo_c - Myo_m) + Mgi * kon_tnchmg * (Bmax_TnChigh - Tn_CHc - Tn_CHm) + kon_cam * (Bmax_CaM - CaM) * Ca_i + kon_myoca * (Bmax_myosin - Myo_c - Myo_m) * Ca_i + kon_sr * (Bmax_SR - SRB) * Ca_i + kon_tnchca * (Bmax_TnChigh - Tn_CHc - Tn_CHm) * Ca_i + kon_tncl * (Bmax_TnClow - Tn_CL) * Ca_i; // Expressions for the Junctional and SL Ca Buffers component const double Bmax_SLlowsl = Bmax_SLlowsl0 * Vmyo / Vsl; const double Bmax_SLlowj = Bmax_SLlowj0 * Vmyo / Vjunc; const double Bmax_SLhighsl = Bmax_SLhighsl0 * Vmyo / Vsl; const double Bmax_SLhighj = Bmax_SLhighj0 * Vmyo / Vjunc; const double dSLL_j_dt = -koff_sll * SLL_j + kon_sll * (-SLL_j + Bmax_SLlowj) * Ca_j; d_states[n_nodes * STATE_SLL_j + i] = dt * dSLL_j_dt + SLL_j; const double dSLL_sl_dt = -koff_sll * SLL_sl + kon_sll * (-SLL_sl + Bmax_SLlowsl) * Ca_sl; d_states[n_nodes * STATE_SLL_sl + i] = dt * dSLL_sl_dt + SLL_sl; const double dSLH_j_dt = -koff_slh * SLH_j + kon_slh * (-SLH_j + Bmax_SLhighj) * Ca_j; d_states[n_nodes * STATE_SLH_j + i] = dt * dSLH_j_dt + SLH_j; const double dSLH_sl_dt = -koff_slh * SLH_sl + kon_slh * (-SLH_sl + Bmax_SLhighsl) * Ca_sl; d_states[n_nodes * STATE_SLH_sl + i] = dt * dSLH_sl_dt + SLH_sl; const double J_CaB_junction = -koff_slh * SLH_j - koff_sll * SLL_j + kon_slh * (-SLH_j + Bmax_SLhighj) * Ca_j + kon_sll * (-SLL_j + Bmax_SLlowj) * Ca_j; const double J_CaB_sl = -koff_slh * SLH_sl - koff_sll * SLL_sl + kon_slh * (-SLH_sl + Bmax_SLhighsl) * Ca_sl + kon_sll * (-SLL_sl + Bmax_SLlowsl) * Ca_sl; // Expressions for the SR Ca Concentrations component const double Bmax_Csqn = Bmax_Csqn0 * Vmyo / Vsr; const double dCsqn_b_dt = -koff_csqn * Csqn_b + kon_csqn * (-Csqn_b + Bmax_Csqn) * Ca_sr; d_states[n_nodes * STATE_Csqn_b + i] = dt * dCsqn_b_dt + Csqn_b; const double dCa_sr_dt = -J_SRCarel + koff_csqn * Csqn_b - kon_csqn * (-Csqn_b + Bmax_Csqn) * Ca_sr - J_SRleak * Vmyo / Vsr + J_serca; d_states[n_nodes * STATE_Ca_sr + i] = dt * dCa_sr_dt + Ca_sr; // Expressions for the Na Concentrations component const double I_Na_tot_junc = 3. * I_nak_junc + 3. * I_ncx_junc + I_CaNa_junc + I_Na_junc + I_nabk_junc; const double I_Na_tot_sl = 3. * I_nak_sl + 3. * I_ncx_sl + I_CaNa_sl + I_Na_sl + I_nabk_sl; const double dNa_j_dt = -dNa_Bj_dt + J_na_juncsl * (-Na_j + Na_sl) / Vjunc - Cmem * I_Na_tot_junc / (Frdy * Vjunc); d_states[n_nodes * STATE_Na_j + i] = dt * dNa_j_dt + Na_j; const double dNa_sl_dt = -dNa_Bsl_dt + J_na_juncsl * (-Na_sl + Na_j) / Vsl + J_na_slmyo * (-Na_sl + Na_i) / Vsl - Cmem * I_Na_tot_sl / (Frdy * Vsl); d_states[n_nodes * STATE_Na_sl + i] = dt * dNa_sl_dt + Na_sl; const double dNa_i_dt = J_na_slmyo * (-Na_i + Na_sl) / Vmyo; d_states[n_nodes * STATE_Na_i + i] = dt * dNa_i_dt + Na_i; // Expressions for the K Concentration component const double I_K_tot = -2. * I_nak + I_CaK + I_K1 + I_kp + I_kr + I_ks + I_to; const double dK_i_dt = 0.; d_states[n_nodes * STATE_K_i + i] = dt * dK_i_dt + K_i; // Expressions for the Ca Concentrations component const double I_Ca_tot_junc = -2. * I_ncx_junc + I_Ca_junc + I_cabk_junc + I_pca_junc; const double I_Ca_tot_sl = -2. * I_ncx_sl + I_Ca_sl + I_cabk_sl + I_pca_sl; const double dCa_j_dt = -J_CaB_junction + J_ca_juncsl * (-Ca_j + Ca_sl) / Vjunc + J_SRCarel * Vsr / Vjunc + J_SRleak * Vmyo / Vjunc - Cmem * I_Ca_tot_junc / (2. * Frdy * Vjunc); d_states[n_nodes * STATE_Ca_j + i] = dt * dCa_j_dt + Ca_j; const double dCa_sl_dt = -J_CaB_sl + J_ca_juncsl * (-Ca_sl + Ca_j) / Vsl + J_ca_slmyo * (-Ca_sl + Ca_i) / Vsl - Cmem * I_Ca_tot_sl / (2. * Frdy * Vsl); d_states[n_nodes * STATE_Ca_sl + i] = dt * dCa_sl_dt + Ca_sl; const double dCa_i_dt = -J_CaB_cytosol + J_ca_slmyo * (-Ca_i + Ca_sl) / Vmyo - J_serca * Vsr / Vmyo; d_states[n_nodes * STATE_Ca_i + i] = dt * dCa_i_dt + Ca_i; // Expressions for the Membrane potential component const double i_Stim = (t - stim_period * floor(t / stim_period) <= stim_duration + stim_start && t - stim_period * floor(t / stim_period) >= stim_start ? -stim_amplitude : 0.); const double I_Na_tot = I_Na_tot_junc + I_Na_tot_sl; const double I_Cl_tot = I_ClCa + I_Clbk; const double I_Ca_tot = I_Ca_tot_junc + I_Ca_tot_sl; const double I_tot = I_Ca_tot + I_Cl_tot + I_K_tot + I_Na_tot; const double dV_m_dt = -I_tot - i_Stim; d_states[n_nodes * STATE_V_m + i] = dt * dV_m_dt + V_m; } } // Compute a forward step using the rush larsen algorithm to the grandi ODE void GRL1(double *d_states, const double t, const double dt, const double *d_parameters, const uint64_t n_nodes) { #pragma omp parallel for for (uint64_t i = 0; i < n_nodes; i++) { // Assign states const double m = d_states[n_nodes * STATE_m + i]; const double h = d_states[n_nodes * STATE_h + i]; const double j = d_states[n_nodes * STATE_j + i]; const double x_kr = d_states[n_nodes * STATE_x_kr + i]; const double x_ks = d_states[n_nodes * STATE_x_ks + i]; const double x_to_s = d_states[n_nodes * STATE_x_to_s + i]; const double y_to_s = d_states[n_nodes * STATE_y_to_s + i]; const double x_to_f = d_states[n_nodes * STATE_x_to_f + i]; const double y_to_f = d_states[n_nodes * STATE_y_to_f + i]; const double d = d_states[n_nodes * STATE_d + i]; const double f = d_states[n_nodes * STATE_f + i]; const double f_Ca_Bj = d_states[n_nodes * STATE_f_Ca_Bj + i]; const double f_Ca_Bsl = d_states[n_nodes * STATE_f_Ca_Bsl + i]; const double Ry_Rr = d_states[n_nodes * STATE_Ry_Rr + i]; const double Ry_Ro = d_states[n_nodes * STATE_Ry_Ro + i]; const double Ry_Ri = d_states[n_nodes * STATE_Ry_Ri + i]; const double Na_Bj = d_states[n_nodes * STATE_Na_Bj + i]; const double Na_Bsl = d_states[n_nodes * STATE_Na_Bsl + i]; const double Tn_CL = d_states[n_nodes * STATE_Tn_CL + i]; const double Tn_CHc = d_states[n_nodes * STATE_Tn_CHc + i]; const double Tn_CHm = d_states[n_nodes * STATE_Tn_CHm + i]; const double CaM = d_states[n_nodes * STATE_CaM + i]; const double Myo_c = d_states[n_nodes * STATE_Myo_c + i]; const double Myo_m = d_states[n_nodes * STATE_Myo_m + i]; const double SRB = d_states[n_nodes * STATE_SRB + i]; const double SLL_j = d_states[n_nodes * STATE_SLL_j + i]; const double SLL_sl = d_states[n_nodes * STATE_SLL_sl + i]; const double SLH_j = d_states[n_nodes * STATE_SLH_j + i]; const double SLH_sl = d_states[n_nodes * STATE_SLH_sl + i]; const double Csqn_b = d_states[n_nodes * STATE_Csqn_b + i]; const double Ca_sr = d_states[n_nodes * STATE_Ca_sr + i]; const double Na_j = d_states[n_nodes * STATE_Na_j + i]; const double Na_sl = d_states[n_nodes * STATE_Na_sl + i]; const double Na_i = d_states[n_nodes * STATE_Na_i + i]; const double K_i = d_states[n_nodes * STATE_K_i + i]; const double Ca_j = d_states[n_nodes * STATE_Ca_j + i]; const double Ca_sl = d_states[n_nodes * STATE_Ca_sl + i]; const double Ca_i = d_states[n_nodes * STATE_Ca_i + i]; const double V_m = d_states[n_nodes * STATE_V_m + i]; // Assign parameters const double Fjunc = d_parameters[n_nodes * PARAM_Fjunc + i]; const double Fjunc_CaL = d_parameters[n_nodes * PARAM_Fjunc_CaL + i]; const double cellLength = d_parameters[n_nodes * PARAM_cellLength + i]; const double cellRadius = d_parameters[n_nodes * PARAM_cellRadius + i]; const double GNa = d_parameters[n_nodes * PARAM_GNa + i]; const double GNaB = d_parameters[n_nodes * PARAM_GNaB + i]; const double IbarNaK = d_parameters[n_nodes * PARAM_IbarNaK + i]; const double KmKo = d_parameters[n_nodes * PARAM_KmKo + i]; const double KmNaip = d_parameters[n_nodes * PARAM_KmNaip + i]; const double GKr = d_parameters[n_nodes * PARAM_GKr + i]; const double GKp = d_parameters[n_nodes * PARAM_GKp + i]; const double GKs = d_parameters[n_nodes * PARAM_GKs + i]; const double pNaK = d_parameters[n_nodes * PARAM_pNaK + i]; const double GK1 = d_parameters[n_nodes * PARAM_GK1 + i]; const double Gto = d_parameters[n_nodes * PARAM_Gto + i]; const double epi = d_parameters[n_nodes * PARAM_epi + i]; const double GClB = d_parameters[n_nodes * PARAM_GClB + i]; const double GClCa = d_parameters[n_nodes * PARAM_GClCa + i]; const double KdClCa = d_parameters[n_nodes * PARAM_KdClCa + i]; const double GCaL = d_parameters[n_nodes * PARAM_GCaL + i]; const double Q10CaL = d_parameters[n_nodes * PARAM_Q10CaL + i]; const double pCa = d_parameters[n_nodes * PARAM_pCa + i]; const double pK = d_parameters[n_nodes * PARAM_pK + i]; const double pNa = d_parameters[n_nodes * PARAM_pNa + i]; const double IbarNCX = d_parameters[n_nodes * PARAM_IbarNCX + i]; const double Kdact = d_parameters[n_nodes * PARAM_Kdact + i]; const double KmCai = d_parameters[n_nodes * PARAM_KmCai + i]; const double KmCao = d_parameters[n_nodes * PARAM_KmCao + i]; const double KmNai = d_parameters[n_nodes * PARAM_KmNai + i]; const double KmNao = d_parameters[n_nodes * PARAM_KmNao + i]; const double Q10NCX = d_parameters[n_nodes * PARAM_Q10NCX + i]; const double ksat = d_parameters[n_nodes * PARAM_ksat + i]; const double nu = d_parameters[n_nodes * PARAM_nu + i]; const double IbarSLCaP = d_parameters[n_nodes * PARAM_IbarSLCaP + i]; const double KmPCa = d_parameters[n_nodes * PARAM_KmPCa + i]; const double Q10SLCaP = d_parameters[n_nodes * PARAM_Q10SLCaP + i]; const double GCaB = d_parameters[n_nodes * PARAM_GCaB + i]; const double Kmf = d_parameters[n_nodes * PARAM_Kmf + i]; const double Kmr = d_parameters[n_nodes * PARAM_Kmr + i]; const double MaxSR = d_parameters[n_nodes * PARAM_MaxSR + i]; const double MinSR = d_parameters[n_nodes * PARAM_MinSR + i]; const double Q10SRCaP = d_parameters[n_nodes * PARAM_Q10SRCaP + i]; const double Vmax_SRCaP = d_parameters[n_nodes * PARAM_Vmax_SRCaP + i]; const double ec50SR = d_parameters[n_nodes * PARAM_ec50SR + i]; const double hillSRCaP = d_parameters[n_nodes * PARAM_hillSRCaP + i]; const double kiCa = d_parameters[n_nodes * PARAM_kiCa + i]; const double kim = d_parameters[n_nodes * PARAM_kim + i]; const double koCa = d_parameters[n_nodes * PARAM_koCa + i]; const double kom = d_parameters[n_nodes * PARAM_kom + i]; const double ks = d_parameters[n_nodes * PARAM_ks + i]; const double Bmax_Naj = d_parameters[n_nodes * PARAM_Bmax_Naj + i]; const double Bmax_Nasl = d_parameters[n_nodes * PARAM_Bmax_Nasl + i]; const double koff_na = d_parameters[n_nodes * PARAM_koff_na + i]; const double kon_na = d_parameters[n_nodes * PARAM_kon_na + i]; const double Bmax_CaM = d_parameters[n_nodes * PARAM_Bmax_CaM + i]; const double Bmax_SR = d_parameters[n_nodes * PARAM_Bmax_SR + i]; const double Bmax_TnChigh = d_parameters[n_nodes * PARAM_Bmax_TnChigh + i]; const double Bmax_TnClow = d_parameters[n_nodes * PARAM_Bmax_TnClow + i]; const double Bmax_myosin = d_parameters[n_nodes * PARAM_Bmax_myosin + i]; const double koff_cam = d_parameters[n_nodes * PARAM_koff_cam + i]; const double koff_myoca = d_parameters[n_nodes * PARAM_koff_myoca + i]; const double koff_myomg = d_parameters[n_nodes * PARAM_koff_myomg + i]; const double koff_sr = d_parameters[n_nodes * PARAM_koff_sr + i]; const double koff_tnchca = d_parameters[n_nodes * PARAM_koff_tnchca + i]; const double koff_tnchmg = d_parameters[n_nodes * PARAM_koff_tnchmg + i]; const double koff_tncl = d_parameters[n_nodes * PARAM_koff_tncl + i]; const double kon_cam = d_parameters[n_nodes * PARAM_kon_cam + i]; const double kon_myoca = d_parameters[n_nodes * PARAM_kon_myoca + i]; const double kon_myomg = d_parameters[n_nodes * PARAM_kon_myomg + i]; const double kon_sr = d_parameters[n_nodes * PARAM_kon_sr + i]; const double kon_tnchca = d_parameters[n_nodes * PARAM_kon_tnchca + i]; const double kon_tnchmg = d_parameters[n_nodes * PARAM_kon_tnchmg + i]; const double kon_tncl = d_parameters[n_nodes * PARAM_kon_tncl + i]; const double Bmax_SLhighj0 = d_parameters[n_nodes * PARAM_Bmax_SLhighj0 + i]; const double Bmax_SLhighsl0 = d_parameters[n_nodes * PARAM_Bmax_SLhighsl0 + i]; const double Bmax_SLlowj0 = d_parameters[n_nodes * PARAM_Bmax_SLlowj0 + i]; const double Bmax_SLlowsl0 = d_parameters[n_nodes * PARAM_Bmax_SLlowsl0 + i]; const double koff_slh = d_parameters[n_nodes * PARAM_koff_slh + i]; const double koff_sll = d_parameters[n_nodes * PARAM_koff_sll + i]; const double kon_slh = d_parameters[n_nodes * PARAM_kon_slh + i]; const double kon_sll = d_parameters[n_nodes * PARAM_kon_sll + i]; const double Bmax_Csqn0 = d_parameters[n_nodes * PARAM_Bmax_Csqn0 + i]; const double J_ca_juncsl = d_parameters[n_nodes * PARAM_J_ca_juncsl + i]; const double J_ca_slmyo = d_parameters[n_nodes * PARAM_J_ca_slmyo + i]; const double koff_csqn = d_parameters[n_nodes * PARAM_koff_csqn + i]; const double kon_csqn = d_parameters[n_nodes * PARAM_kon_csqn + i]; const double J_na_juncsl = d_parameters[n_nodes * PARAM_J_na_juncsl + i]; const double J_na_slmyo = d_parameters[n_nodes * PARAM_J_na_slmyo + i]; const double Nao = d_parameters[n_nodes * PARAM_Nao + i]; const double Ko = d_parameters[n_nodes * PARAM_Ko + i]; const double Cao = d_parameters[n_nodes * PARAM_Cao + i]; const double Cli = d_parameters[n_nodes * PARAM_Cli + i]; const double Clo = d_parameters[n_nodes * PARAM_Clo + i]; const double Mgi = d_parameters[n_nodes * PARAM_Mgi + i]; const double Cmem = d_parameters[n_nodes * PARAM_Cmem + i]; const double Frdy = d_parameters[n_nodes * PARAM_Frdy + i]; const double R = d_parameters[n_nodes * PARAM_R + i]; const double Temp = d_parameters[n_nodes * PARAM_Temp + i]; const double stim_amplitude = d_parameters[n_nodes * PARAM_stim_amplitude + i]; const double stim_duration = d_parameters[n_nodes * PARAM_stim_duration + i]; const double stim_period = d_parameters[n_nodes * PARAM_stim_period + i]; const double stim_start = d_parameters[n_nodes * PARAM_stim_start + i]; // Expressions for the Geometry component const double Vcell = 1.0e-15 * M_PI * cellLength * (cellRadius * cellRadius); const double Vmyo = 0.65 * Vcell; const double Vsr = 0.035 * Vcell; const double Vsl = 0.02 * Vcell; const double Vjunc = 0.000539 * Vcell; const double Fsl = 1. - Fjunc; const double Fsl_CaL = 1. - Fjunc_CaL; // Expressions for the Reversal potentials component const double FoRT = Frdy / (R * Temp); const double ena_junc = log(Nao / Na_j) / FoRT; const double ena_sl = log(Nao / Na_sl) / FoRT; const double ek = log(Ko / K_i) / FoRT; const double eca_junc = log(Cao / Ca_j) / (2. * FoRT); const double eca_sl = log(Cao / Ca_sl) / (2. * FoRT); const double ecl = log(Cli / Clo) / FoRT; const double Qpow = -31. + Temp / 10.; // Expressions for the I_Na component const double mss = 1.0 / ((1. + 0.00184221158116513 * exp(-0.110741971207087 * V_m)) * (1. + 0.00184221158116513 * exp(-0.110741971207087 * V_m))); const double taum = 0.1292 * exp(-((2.94658944658945 + 0.0643500643500644 * V_m) * (2.94658944658945 + 0.0643500643500644 * V_m))) + 0.06487 * exp(-((-0.0943466353677621 + 0.0195618153364632 * V_m) * (-0.0943466353677621 + 0.0195618153364632 * V_m))); const double ah = (V_m >= -40. ? 0. : 4.43126792958051e-7 * exp(-0.147058823529412 * V_m)); const double bh = (V_m >= -40. ? 0.77 / (0.13 + 0.0497581410839387 * exp(-0.0900900900900901 * V_m)) : 310000.0 * exp(0.3485 * V_m) + 2.7 * exp(0.079 * V_m)); const double tauh = 1.0 / (ah + bh); const double hss = 1.0 / ((1. + 15212.5932856544 * exp(0.134589502018843 * V_m)) * (1. + 15212.5932856544 * exp(0.134589502018843 * V_m))); const double aj = (V_m >= -40. ? 0. : (37.78 + V_m) * (-25428.0 * exp(0.2444 * V_m) - 6.948e-6 * exp(-0.04391 * V_m)) / (1. + 50262745825.954 * exp(0.311 * V_m))); const double bj = (V_m >= -40. ? 0.6 * exp(0.057 * V_m) / (1. + 0.0407622039783662 * exp(-0.1 * V_m)) : 0.02424 * exp(-0.01052 * V_m) / (1. + 0.00396086833990426 * exp(-0.1378 * V_m))); const double tauj = 1.0 / (aj + bj); const double jss = 1.0 / ((1. + 15212.5932856544 * exp(0.134589502018843 * V_m)) * (1. + 15212.5932856544 * exp(0.134589502018843 * V_m))); const double dm_dt = (-m + mss) / taum; const double dm_dt_linearized = -1. / taum; d_states[n_nodes * STATE_m + i] = (fabs(dm_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dm_dt_linearized)) * dm_dt / dm_dt_linearized : dt * dm_dt) + m; const double dh_dt = (-h + hss) / tauh; const double dh_dt_linearized = -1. / tauh; d_states[n_nodes * STATE_h + i] = (fabs(dh_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dh_dt_linearized)) * dh_dt / dh_dt_linearized : dt * dh_dt) + h; const double dj_dt = (-j + jss) / tauj; const double dj_dt_linearized = -1. / tauj; d_states[n_nodes * STATE_j + i] = (fabs(dj_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dj_dt_linearized)) * dj_dt / dj_dt_linearized : dt * dj_dt) + j; const double I_Na_junc = Fjunc * GNa * (m * m * m) * (-ena_junc + V_m) * h * j; const double I_Na_sl = GNa * (m * m * m) * (-ena_sl + V_m) * Fsl * h * j; // Expressions for the I_NaBK component const double I_nabk_junc = Fjunc * GNaB * (-ena_junc + V_m); const double I_nabk_sl = GNaB * (-ena_sl + V_m) * Fsl; // Expressions for the I_NaK component const double sigma = -1. / 7. + exp(0.0148588410104012 * Nao) / 7.; const double fnak = 1.0 / (1. + 0.1245 * exp(-0.1 * FoRT * V_m) + 0.0365 * exp(-FoRT * V_m) * sigma); const double I_nak_junc = Fjunc * IbarNaK * Ko * fnak / ((1. + (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) / (((Na_j) * (Na_j)) * ((Na_j) * (Na_j)))) * (KmKo + Ko)); const double I_nak_sl = IbarNaK * Ko * Fsl * fnak / ((1. + (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) / (((Na_sl) * (Na_sl)) * ((Na_sl) * (Na_sl)))) * (KmKo + Ko)); const double I_nak = I_nak_junc + I_nak_sl; // Expressions for the I_Kr component const double gkr = 0.430331482911935 * GKr * sqrt(Ko); const double xrss = 1.0 / (1. + exp(-2. - V_m / 5.)); const double tauxr = 230. / (1. + exp(2. + V_m / 20.)) + 3300. / ((1. + exp(-22. / 9. - V_m / 9.)) * (1. + exp(11. / 9. + V_m / 9.))); const double dx_kr_dt = (-x_kr + xrss) / tauxr; const double dx_kr_dt_linearized = -1. / tauxr; d_states[n_nodes * STATE_x_kr + i] = (fabs(dx_kr_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dx_kr_dt_linearized)) * dx_kr_dt / dx_kr_dt_linearized : dt * dx_kr_dt) + x_kr; const double rkr = 1.0 / (1. + exp(37. / 12. + V_m / 24.)); const double I_kr = (-ek + V_m) * gkr * rkr * x_kr; // Expressions for the I_Kp component const double kp_kp = 1.0 / (1. + 1786.47556537862 * exp(-0.167224080267559 * V_m)); const double I_kp_junc = Fjunc * GKp * (-ek + V_m) * kp_kp; const double I_kp_sl = GKp * (-ek + V_m) * Fsl * kp_kp; const double I_kp = I_kp_junc + I_kp_sl; // Expressions for the I_Ks component const double eks = log((Ko + Nao * pNaK) / (pNaK * Na_i + K_i)) / FoRT; const double gks_junc = GKs; const double gks_sl = GKs; const double xsss = 1.0 / (1. + 0.765928338364649 * exp(-0.0701754385964912 * V_m)); const double tauxs = 990.1 / (1. + 0.841540408868102 * exp(-0.0708215297450425 * V_m)); const double dx_ks_dt = (-x_ks + xsss) / tauxs; const double dx_ks_dt_linearized = -1. / tauxs; d_states[n_nodes * STATE_x_ks + i] = (fabs(dx_ks_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dx_ks_dt_linearized)) * dx_ks_dt / dx_ks_dt_linearized : dt * dx_ks_dt) + x_ks; const double I_ks_junc = Fjunc * (x_ks * x_ks) * (-eks + V_m) * gks_junc; const double I_ks_sl = (x_ks * x_ks) * (-eks + V_m) * Fsl * gks_sl; const double I_ks = I_ks_junc + I_ks_sl; // Expressions for the I_to component const double GtoSlow = (epi == 1. ? 0.12 * Gto : 0.2892 * Gto); const double GtoFast = (epi == 1. ? 0.88 * Gto : 0.0108 * Gto); const double xtoss = 1.0 / (1. + exp(19. / 13. - V_m / 13.)); const double ytoss = 1.0 / (1. + 49.4024491055302 * exp(V_m / 5.)); const double tauxtos = 0.5 + 9. / (1. + exp(1. / 5. + V_m / 15.)); const double tauytos = 30. + 800. / (1. + exp(6. + V_m / 10.)); const double dx_to_s_dt = (-x_to_s + xtoss) / tauxtos; const double dx_to_s_dt_linearized = -1. / tauxtos; d_states[n_nodes * STATE_x_to_s + i] = (fabs(dx_to_s_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dx_to_s_dt_linearized)) * dx_to_s_dt / dx_to_s_dt_linearized : dt * dx_to_s_dt) + x_to_s; const double dy_to_s_dt = (-y_to_s + ytoss) / tauytos; const double dy_to_s_dt_linearized = -1. / tauytos; d_states[n_nodes * STATE_y_to_s + i] = (fabs(dy_to_s_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dy_to_s_dt_linearized)) * dy_to_s_dt / dy_to_s_dt_linearized : dt * dy_to_s_dt) + y_to_s; const double I_tos = (-ek + V_m) * GtoSlow * x_to_s * y_to_s; const double tauxtof = 0.5 + 8.5 * exp(-((9. / 10. + V_m / 50.) * (9. / 10. + V_m / 50.))); const double tauytof = 7. + 85. * exp(-((40. + V_m) * (40. + V_m)) / 220.); const double dx_to_f_dt = (-x_to_f + xtoss) / tauxtof; const double dx_to_f_dt_linearized = -1. / tauxtof; d_states[n_nodes * STATE_x_to_f + i] = (fabs(dx_to_f_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dx_to_f_dt_linearized)) * dx_to_f_dt / dx_to_f_dt_linearized : dt * dx_to_f_dt) + x_to_f; const double dy_to_f_dt = (-y_to_f + ytoss) / tauytof; const double dy_to_f_dt_linearized = -1. / tauytof; d_states[n_nodes * STATE_y_to_f + i] = (fabs(dy_to_f_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dy_to_f_dt_linearized)) * dy_to_f_dt / dy_to_f_dt_linearized : dt * dy_to_f_dt) + y_to_f; const double I_tof = (-ek + V_m) * GtoFast * x_to_f * y_to_f; const double I_to = I_tof + I_tos; // Expressions for the I_K1 component const double aki = 1.02 / (1. + 7.35454251046446e-7 * exp(0.2385 * V_m - 0.2385 * ek)); const double bki = (0.762624006506308 * exp(0.08032 * V_m - 0.08032 * ek) + 1.15340563518656e-16 * exp(0.06175 * V_m - 0.06175 * ek)) / (1. + 0.0867722941576933 * exp(0.5143 * ek - 0.5143 * V_m)); const double kiss = aki / (aki + bki); const double I_K1 = 0.430331482911935 * GK1 * sqrt(Ko) * (-ek + V_m) * kiss; // Expressions for the I_ClCa component const double I_ClCa_junc = Fjunc * GClCa * (-ecl + V_m) / (1. + KdClCa / Ca_j); const double I_ClCa_sl = GClCa * (-ecl + V_m) * Fsl / (1. + KdClCa / Ca_sl); const double I_ClCa = I_ClCa_junc + I_ClCa_sl; const double I_Clbk = GClB * (-ecl + V_m); // Expressions for the I_Ca component const double fss = 1.0 / (1. + exp(35. / 9. + V_m / 9.)) + 0.6 / (1. + exp(5. / 2. - V_m / 20.)); const double dss = 1.0 / (1. + exp(-5. / 6. - V_m / 6.)); const double taud = (1. - exp(-5. / 6. - V_m / 6.)) * dss / (0.175 + 0.035 * V_m); const double tauf = 1.0 / (0.02 + 0.0197 * exp(-((0.48865 + 0.0337 * V_m) * (0.48865 + 0.0337 * V_m)))); const double dd_dt = (-d + dss) / taud; const double dd_dt_linearized = -1. / taud; d_states[n_nodes * STATE_d + i] = (fabs(dd_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dd_dt_linearized)) * dd_dt / dd_dt_linearized : dt * dd_dt) + d; const double df_dt = (-f + fss) / tauf; const double df_dt_linearized = -1. / tauf; d_states[n_nodes * STATE_f + i] = (fabs(df_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * df_dt_linearized)) * df_dt / df_dt_linearized : dt * df_dt) + f; const double df_Ca_Bj_dt = -0.0119 * f_Ca_Bj + 1.7 * (1. - f_Ca_Bj) * Ca_j; const double df_Ca_Bj_dt_linearized = -0.0119 - 1.7 * Ca_j; d_states[n_nodes * STATE_f_Ca_Bj + i] = (fabs(df_Ca_Bj_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * df_Ca_Bj_dt_linearized)) * df_Ca_Bj_dt / df_Ca_Bj_dt_linearized : dt * df_Ca_Bj_dt) + f_Ca_Bj; const double df_Ca_Bsl_dt = -0.0119 * f_Ca_Bsl + 1.7 * (1. - f_Ca_Bsl) * Ca_sl; const double df_Ca_Bsl_dt_linearized = -0.0119 - 1.7 * Ca_sl; d_states[n_nodes * STATE_f_Ca_Bsl + i] = (fabs(df_Ca_Bsl_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * df_Ca_Bsl_dt_linearized)) * df_Ca_Bsl_dt / df_Ca_Bsl_dt_linearized : dt * df_Ca_Bsl_dt) + f_Ca_Bsl; const double fcaCaMSL = 0.; const double fcaCaj = 0.; const double ibarca_j = 4. * Frdy * GCaL * pCa * (-0.341 * Cao + 0.341 * Ca_j * exp(2. * FoRT * V_m)) * FoRT * V_m / (-1. + exp(2. * FoRT * V_m)); const double ibarca_sl = 4. * Frdy * GCaL * pCa * (-0.341 * Cao + 0.341 * Ca_sl * exp(2. * FoRT * V_m)) * FoRT * V_m / (-1. + exp(2. * FoRT * V_m)); const double ibark = Frdy * GCaL * pK * (-0.75 * Ko + 0.75 * K_i * exp(FoRT * V_m)) * FoRT * V_m / (-1. + exp(FoRT * V_m)); const double ibarna_j = Frdy * GCaL * pNa * (-0.75 * Nao + 0.75 * Na_j * exp(FoRT * V_m)) * FoRT * V_m / (-1. + exp(FoRT * V_m)); const double ibarna_sl = Frdy * GCaL * pNa * (-0.75 * Nao + 0.75 * Na_sl * exp(FoRT * V_m)) * FoRT * V_m / (-1. + exp(FoRT * V_m)); const double I_Ca_junc = 0.45 * Fjunc_CaL * pow(Q10CaL, Qpow) * (1. + fcaCaj - f_Ca_Bj) * d * f * ibarca_j; const double I_Ca_sl = 0.45 * pow(Q10CaL, Qpow) * (1. + fcaCaMSL - f_Ca_Bsl) * Fsl_CaL * d * f * ibarca_sl; const double I_CaK = 0.45 * pow(Q10CaL, Qpow) * (Fjunc_CaL * (1. + fcaCaj - f_Ca_Bj) + (1. + fcaCaMSL - f_Ca_Bsl) * Fsl_CaL) * d * f * ibark; const double I_CaNa_junc = 0.45 * Fjunc_CaL * pow(Q10CaL, Qpow) * (1. + fcaCaj - f_Ca_Bj) * d * f * ibarna_j; const double I_CaNa_sl = 0.45 * pow(Q10CaL, Qpow) * (1. + fcaCaMSL - f_Ca_Bsl) * Fsl_CaL * d * f * ibarna_sl; // Expressions for the I_NCX component const double Ka_junc = 1.0 / (1. + (Kdact * Kdact) / (Ca_j * Ca_j)); const double Ka_sl = 1.0 / (1. + (Kdact * Kdact) / (Ca_sl * Ca_sl)); const double s1_junc = Cao * (Na_j * Na_j * Na_j) * exp(nu * FoRT * V_m); const double s1_sl = Cao * (Na_sl * Na_sl * Na_sl) * exp(nu * FoRT * V_m); const double s2_junc = (Nao * Nao * Nao) * Ca_j * exp((-1. + nu) * FoRT * V_m); const double s3_junc = Cao * (Na_j * Na_j * Na_j) + KmCao * (Na_j * Na_j * Na_j) + (Nao * Nao * Nao) * Ca_j + KmCai * (Nao * Nao * Nao) * (1. + (Na_j * Na_j * Na_j) / (KmNai * KmNai * KmNai)) + (KmNao * KmNao * KmNao) * (1. + Ca_j / KmCai) * Ca_j; const double s2_sl = (Nao * Nao * Nao) * Ca_sl * exp((-1. + nu) * FoRT * V_m); const double s3_sl = Cao * (Na_sl * Na_sl * Na_sl) + KmCao * (Na_sl * Na_sl * Na_sl) + (Nao * Nao * Nao) * Ca_sl + KmCai * (Nao * Nao * Nao) * (1. + (Na_sl * Na_sl * Na_sl) / (KmNai * KmNai * KmNai)) + (KmNao * KmNao * KmNao) * (1. + Ca_sl / KmCai) * Ca_sl; const double I_ncx_junc = Fjunc * IbarNCX * pow(Q10NCX, Qpow) * (-s2_junc + s1_junc) * Ka_junc / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_junc); const double I_ncx_sl = IbarNCX * pow(Q10NCX, Qpow) * (-s2_sl + s1_sl) * Fsl * Ka_sl / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_sl); // Expressions for the I_PCa component const double I_pca_junc = Fjunc * IbarSLCaP * pow(Q10SLCaP, Qpow) * pow(Ca_j, 1.6) / (pow(KmPCa, 1.6) + pow(Ca_j, 1.6)); const double I_pca_sl = IbarSLCaP * pow(Q10SLCaP, Qpow) * pow(Ca_sl, 1.6) * Fsl / (pow(KmPCa, 1.6) + pow(Ca_sl, 1.6)); // Expressions for the I_CaBK component const double I_cabk_junc = Fjunc * GCaB * (-eca_junc + V_m); const double I_cabk_sl = GCaB * (-eca_sl + V_m) * Fsl; // Expressions for the SR Fluxes component const double kCaSR = MaxSR - (MaxSR - MinSR) / (1. + pow(ec50SR / Ca_sr, 2.5)); const double koSRCa = koCa / kCaSR; const double kiSRCa = kiCa * kCaSR; const double RI = 1. - Ry_Ri - Ry_Ro - Ry_Rr; const double dRy_Rr_dt = kim * RI + kom * Ry_Ro - (Ca_j * Ca_j) * Ry_Rr * koSRCa - Ca_j * Ry_Rr * kiSRCa; const double dRy_Rr_dt_linearized = -kim - (Ca_j * Ca_j) * koSRCa - Ca_j * kiSRCa; d_states[n_nodes * STATE_Ry_Rr + i] = (fabs(dRy_Rr_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dRy_Rr_dt_linearized)) * dRy_Rr_dt / dRy_Rr_dt_linearized : dt * dRy_Rr_dt) + Ry_Rr; const double dRy_Ro_dt = kim * Ry_Ri - kom * Ry_Ro + (Ca_j * Ca_j) * Ry_Rr * koSRCa - Ca_j * Ry_Ro * kiSRCa; const double dRy_Ro_dt_linearized = -kom - Ca_j * kiSRCa; d_states[n_nodes * STATE_Ry_Ro + i] = (fabs(dRy_Ro_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dRy_Ro_dt_linearized)) * dRy_Ro_dt / dRy_Ro_dt_linearized : dt * dRy_Ro_dt) + Ry_Ro; const double dRy_Ri_dt = -kim * Ry_Ri - kom * Ry_Ri + (Ca_j * Ca_j) * RI * koSRCa + Ca_j * Ry_Ro * kiSRCa; const double dRy_Ri_dt_linearized = -kim - kom - (Ca_j * Ca_j) * koSRCa; d_states[n_nodes * STATE_Ry_Ri + i] = (fabs(dRy_Ri_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dRy_Ri_dt_linearized)) * dRy_Ri_dt / dRy_Ri_dt_linearized : dt * dRy_Ri_dt) + Ry_Ri; const double J_SRCarel = ks * (-Ca_j + Ca_sr) * Ry_Ro; const double J_serca = Vmax_SRCaP * pow(Q10SRCaP, Qpow) * (pow(Ca_i / Kmf, hillSRCaP) - pow(Ca_sr / Kmr, hillSRCaP)) / (1. + pow(Ca_i / Kmf, hillSRCaP) + pow(Ca_sr / Kmr, hillSRCaP)); const double J_SRleak = 5.348e-6 * Ca_sr - 5.348e-6 * Ca_j; // Expressions for the Na Buffers component const double dNa_Bj_dt = -koff_na * Na_Bj + kon_na * (Bmax_Naj - Na_Bj) * Na_j; const double dNa_Bj_dt_linearized = -koff_na - kon_na * Na_j; d_states[n_nodes * STATE_Na_Bj + i] = Na_Bj + (fabs(dNa_Bj_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dNa_Bj_dt_linearized)) * dNa_Bj_dt / dNa_Bj_dt_linearized : dt * dNa_Bj_dt); const double dNa_Bsl_dt = -koff_na * Na_Bsl + kon_na * (Bmax_Nasl - Na_Bsl) * Na_sl; const double dNa_Bsl_dt_linearized = -koff_na - kon_na * Na_sl; d_states[n_nodes * STATE_Na_Bsl + i] = Na_Bsl + (fabs(dNa_Bsl_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dNa_Bsl_dt_linearized)) * dNa_Bsl_dt / dNa_Bsl_dt_linearized : dt * dNa_Bsl_dt); // Expressions for the Cytosolic Ca Buffers component const double dTn_CL_dt = -koff_tncl * Tn_CL + kon_tncl * (Bmax_TnClow - Tn_CL) * Ca_i; const double dTn_CL_dt_linearized = -koff_tncl - kon_tncl * Ca_i; d_states[n_nodes * STATE_Tn_CL + i] = (fabs(dTn_CL_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dTn_CL_dt_linearized)) * dTn_CL_dt / dTn_CL_dt_linearized : dt * dTn_CL_dt) + Tn_CL; const double dTn_CHc_dt = -koff_tnchca * Tn_CHc + kon_tnchca * (Bmax_TnChigh - Tn_CHc - Tn_CHm) * Ca_i; const double dTn_CHc_dt_linearized = -koff_tnchca - kon_tnchca * Ca_i; d_states[n_nodes * STATE_Tn_CHc + i] = (fabs(dTn_CHc_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dTn_CHc_dt_linearized)) * dTn_CHc_dt / dTn_CHc_dt_linearized : dt * dTn_CHc_dt) + Tn_CHc; const double dTn_CHm_dt = -koff_tnchmg * Tn_CHm + Mgi * kon_tnchmg * (Bmax_TnChigh - Tn_CHc - Tn_CHm); const double dTn_CHm_dt_linearized = -koff_tnchmg - Mgi * kon_tnchmg; d_states[n_nodes * STATE_Tn_CHm + i] = (fabs(dTn_CHm_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dTn_CHm_dt_linearized)) * dTn_CHm_dt / dTn_CHm_dt_linearized : dt * dTn_CHm_dt) + Tn_CHm; const double dCaM_dt = -koff_cam * CaM + kon_cam * (Bmax_CaM - CaM) * Ca_i; const double dCaM_dt_linearized = -koff_cam - kon_cam * Ca_i; d_states[n_nodes * STATE_CaM + i] = CaM + (fabs(dCaM_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dCaM_dt_linearized)) * dCaM_dt / dCaM_dt_linearized : dt * dCaM_dt); const double dMyo_c_dt = -koff_myoca * Myo_c + kon_myoca * (Bmax_myosin - Myo_c - Myo_m) * Ca_i; const double dMyo_c_dt_linearized = -koff_myoca - kon_myoca * Ca_i; d_states[n_nodes * STATE_Myo_c + i] = Myo_c + (fabs(dMyo_c_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dMyo_c_dt_linearized)) * dMyo_c_dt / dMyo_c_dt_linearized : dt * dMyo_c_dt); const double dMyo_m_dt = -koff_myomg * Myo_m + Mgi * kon_myomg * (Bmax_myosin - Myo_c - Myo_m); const double dMyo_m_dt_linearized = -koff_myomg - Mgi * kon_myomg; d_states[n_nodes * STATE_Myo_m + i] = Myo_m + (fabs(dMyo_m_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dMyo_m_dt_linearized)) * dMyo_m_dt / dMyo_m_dt_linearized : dt * dMyo_m_dt); const double dSRB_dt = -koff_sr * SRB + kon_sr * (Bmax_SR - SRB) * Ca_i; const double dSRB_dt_linearized = -koff_sr - kon_sr * Ca_i; d_states[n_nodes * STATE_SRB + i] = (fabs(dSRB_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dSRB_dt_linearized)) * dSRB_dt / dSRB_dt_linearized : dt * dSRB_dt) + SRB; const double J_CaB_cytosol = -koff_cam * CaM - koff_myoca * Myo_c - koff_myomg * Myo_m - koff_sr * SRB - koff_tnchca * Tn_CHc - koff_tnchmg * Tn_CHm - koff_tncl * Tn_CL + Mgi * kon_myomg * (Bmax_myosin - Myo_c - Myo_m) + Mgi * kon_tnchmg * (Bmax_TnChigh - Tn_CHc - Tn_CHm) + kon_cam * (Bmax_CaM - CaM) * Ca_i + kon_myoca * (Bmax_myosin - Myo_c - Myo_m) * Ca_i + kon_sr * (Bmax_SR - SRB) * Ca_i + kon_tnchca * (Bmax_TnChigh - Tn_CHc - Tn_CHm) * Ca_i + kon_tncl * (Bmax_TnClow - Tn_CL) * Ca_i; // Expressions for the Junctional and SL Ca Buffers component const double Bmax_SLlowsl = Bmax_SLlowsl0 * Vmyo / Vsl; const double Bmax_SLlowj = Bmax_SLlowj0 * Vmyo / Vjunc; const double Bmax_SLhighsl = Bmax_SLhighsl0 * Vmyo / Vsl; const double Bmax_SLhighj = Bmax_SLhighj0 * Vmyo / Vjunc; const double dSLL_j_dt = -koff_sll * SLL_j + kon_sll * (-SLL_j + Bmax_SLlowj) * Ca_j; const double dSLL_j_dt_linearized = -koff_sll - kon_sll * Ca_j; d_states[n_nodes * STATE_SLL_j + i] = (fabs(dSLL_j_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dSLL_j_dt_linearized)) * dSLL_j_dt / dSLL_j_dt_linearized : dt * dSLL_j_dt) + SLL_j; const double dSLL_sl_dt = -koff_sll * SLL_sl + kon_sll * (-SLL_sl + Bmax_SLlowsl) * Ca_sl; const double dSLL_sl_dt_linearized = -koff_sll - kon_sll * Ca_sl; d_states[n_nodes * STATE_SLL_sl + i] = (fabs(dSLL_sl_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dSLL_sl_dt_linearized)) * dSLL_sl_dt / dSLL_sl_dt_linearized : dt * dSLL_sl_dt) + SLL_sl; const double dSLH_j_dt = -koff_slh * SLH_j + kon_slh * (-SLH_j + Bmax_SLhighj) * Ca_j; const double dSLH_j_dt_linearized = -koff_slh - kon_slh * Ca_j; d_states[n_nodes * STATE_SLH_j + i] = (fabs(dSLH_j_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dSLH_j_dt_linearized)) * dSLH_j_dt / dSLH_j_dt_linearized : dt * dSLH_j_dt) + SLH_j; const double dSLH_sl_dt = -koff_slh * SLH_sl + kon_slh * (-SLH_sl + Bmax_SLhighsl) * Ca_sl; const double dSLH_sl_dt_linearized = -koff_slh - kon_slh * Ca_sl; d_states[n_nodes * STATE_SLH_sl + i] = (fabs(dSLH_sl_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dSLH_sl_dt_linearized)) * dSLH_sl_dt / dSLH_sl_dt_linearized : dt * dSLH_sl_dt) + SLH_sl; const double J_CaB_junction = -koff_slh * SLH_j - koff_sll * SLL_j + kon_slh * (-SLH_j + Bmax_SLhighj) * Ca_j + kon_sll * (-SLL_j + Bmax_SLlowj) * Ca_j; const double J_CaB_sl = -koff_slh * SLH_sl - koff_sll * SLL_sl + kon_slh * (-SLH_sl + Bmax_SLhighsl) * Ca_sl + kon_sll * (-SLL_sl + Bmax_SLlowsl) * Ca_sl; // Expressions for the SR Ca Concentrations component const double Bmax_Csqn = Bmax_Csqn0 * Vmyo / Vsr; const double dCsqn_b_dt = -koff_csqn * Csqn_b + kon_csqn * (-Csqn_b + Bmax_Csqn) * Ca_sr; const double dCsqn_b_dt_linearized = -koff_csqn - kon_csqn * Ca_sr; d_states[n_nodes * STATE_Csqn_b + i] = Csqn_b + (fabs(dCsqn_b_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dCsqn_b_dt_linearized)) * dCsqn_b_dt / dCsqn_b_dt_linearized : dt * dCsqn_b_dt); const double dCa_sr_dt = -J_SRCarel + koff_csqn * Csqn_b - kon_csqn * (-Csqn_b + Bmax_Csqn) * Ca_sr - J_SRleak * Vmyo / Vsr + J_serca; const double dJ_serca_dCa_sr = -Vmax_SRCaP * hillSRCaP * pow(Q10SRCaP, Qpow) * pow(Ca_sr / Kmr, hillSRCaP) / ((1. + pow(Ca_i / Kmf, hillSRCaP) + pow(Ca_sr / Kmr, hillSRCaP)) * Ca_sr) - Vmax_SRCaP * hillSRCaP * pow(Q10SRCaP, Qpow) * pow(Ca_sr / Kmr, hillSRCaP) * (pow(Ca_i / Kmf, hillSRCaP) - pow(Ca_sr / Kmr, hillSRCaP)) / (((1. + pow(Ca_i / Kmf, hillSRCaP) + pow(Ca_sr / Kmr, hillSRCaP)) * (1. + pow(Ca_i / Kmf, hillSRCaP) + pow(Ca_sr / Kmr, hillSRCaP))) * Ca_sr); const double dCa_sr_dt_linearized = -kon_csqn * (-Csqn_b + Bmax_Csqn) - ks * Ry_Ro - 5.348e-6 * Vmyo / Vsr + dJ_serca_dCa_sr; d_states[n_nodes * STATE_Ca_sr + i] = Ca_sr + (fabs(dCa_sr_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dCa_sr_dt_linearized)) * dCa_sr_dt / dCa_sr_dt_linearized : dt * dCa_sr_dt); // Expressions for the Na Concentrations component const double I_Na_tot_junc = 3. * I_nak_junc + 3. * I_ncx_junc + I_CaNa_junc + I_Na_junc + I_nabk_junc; const double I_Na_tot_sl = 3. * I_nak_sl + 3. * I_ncx_sl + I_CaNa_sl + I_Na_sl + I_nabk_sl; const double dNa_j_dt = -dNa_Bj_dt + J_na_juncsl * (-Na_j + Na_sl) / Vjunc - Cmem * I_Na_tot_junc / (Frdy * Vjunc); const double dI_ncx_junc_ds1_junc = Fjunc * IbarNCX * pow(Q10NCX, Qpow) * Ka_junc / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_junc); const double ds3_junc_dNa_j = 3. * Cao * (Na_j * Na_j) + 3. * KmCao * (Na_j * Na_j) + 3. * KmCai * (Nao * Nao * Nao) * (Na_j * Na_j) / (KmNai * KmNai * KmNai); const double dI_Na_junc_dena_junc = -Fjunc * GNa * (m * m * m) * h * j; const double dI_nabk_junc_dena_junc = -Fjunc * GNaB; const double ds1_junc_dNa_j = 3. * Cao * (Na_j * Na_j) * exp(nu * FoRT * V_m); const double dI_CaNa_junc_dibarna_j = 0.45 * Fjunc_CaL * pow(Q10CaL, Qpow) * (1. + fcaCaj - f_Ca_Bj) * d * f; const double dI_ncx_junc_ds3_junc = -Fjunc * IbarNCX * pow(Q10NCX, Qpow) * (-s2_junc + s1_junc) * Ka_junc / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * (s3_junc * s3_junc)); const double dena_junc_dNa_j = -1. / (FoRT * Na_j); const double dibarna_j_dNa_j = 0.75 * Frdy * GCaL * pNa * FoRT * V_m * exp(FoRT * V_m) / (-1. + exp(FoRT * V_m)); const double dI_nak_junc_dNa_j = 4. * Fjunc * IbarNaK * Ko * (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) * fnak / (((1. + (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) / (((Na_j) * (Na_j)) * ((Na_j) * (Na_j)))) * (1. + (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) / (((Na_j) * (Na_j)) * ((Na_j) * (Na_j))))) * (KmKo + Ko) * pow(Na_j, 5.)); const double dNa_j_dt_linearized = -J_na_juncsl / Vjunc - Cmem * (3. * dI_nak_junc_dNa_j + dI_CaNa_junc_dibarna_j * dibarna_j_dNa_j + dI_Na_junc_dena_junc * dena_junc_dNa_j + dI_nabk_junc_dena_junc * dena_junc_dNa_j + 3. * dI_ncx_junc_ds1_junc * ds1_junc_dNa_j + 3. * dI_ncx_junc_ds3_junc * ds3_junc_dNa_j) / (Frdy * Vjunc); d_states[n_nodes * STATE_Na_j + i] = Na_j + (fabs(dNa_j_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dNa_j_dt_linearized)) * dNa_j_dt / dNa_j_dt_linearized : dt * dNa_j_dt); const double dNa_sl_dt = -dNa_Bsl_dt + J_na_juncsl * (-Na_sl + Na_j) / Vsl + J_na_slmyo * (-Na_sl + Na_i) / Vsl - Cmem * I_Na_tot_sl / (Frdy * Vsl); const double dI_Na_sl_dena_sl = -GNa * (m * m * m) * Fsl * h * j; const double dI_CaNa_sl_dibarna_sl = 0.45 * pow(Q10CaL, Qpow) * (1. + fcaCaMSL - f_Ca_Bsl) * Fsl_CaL * d * f; const double dI_nabk_sl_dena_sl = -GNaB * Fsl; const double dI_ncx_sl_ds3_sl = -IbarNCX * pow(Q10NCX, Qpow) * (-s2_sl + s1_sl) * Fsl * Ka_sl / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * (s3_sl * s3_sl)); const double ds1_sl_dNa_sl = 3. * Cao * (Na_sl * Na_sl) * exp(nu * FoRT * V_m); const double ds3_sl_dNa_sl = 3. * Cao * (Na_sl * Na_sl) + 3. * KmCao * (Na_sl * Na_sl) + 3. * KmCai * (Nao * Nao * Nao) * (Na_sl * Na_sl) / (KmNai * KmNai * KmNai); const double dibarna_sl_dNa_sl = 0.75 * Frdy * GCaL * pNa * FoRT * V_m * exp(FoRT * V_m) / (-1. + exp(FoRT * V_m)); const double dI_nak_sl_dNa_sl = 4. * IbarNaK * Ko * (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) * Fsl * fnak / (((1. + (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) / (((Na_sl) * (Na_sl)) * ((Na_sl) * (Na_sl)))) * (1. + (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) / (((Na_sl) * (Na_sl)) * ((Na_sl) * (Na_sl))))) * (KmKo + Ko) * pow(Na_sl, 5.)); const double dena_sl_dNa_sl = -1. / (FoRT * Na_sl); const double dI_ncx_sl_ds1_sl = IbarNCX * pow(Q10NCX, Qpow) * Fsl * Ka_sl / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_sl); const double dNa_sl_dt_linearized = -J_na_juncsl / Vsl - J_na_slmyo / Vsl - Cmem * (3. * dI_nak_sl_dNa_sl + dI_CaNa_sl_dibarna_sl * dibarna_sl_dNa_sl + dI_Na_sl_dena_sl * dena_sl_dNa_sl + dI_nabk_sl_dena_sl * dena_sl_dNa_sl + 3. * dI_ncx_sl_ds1_sl * ds1_sl_dNa_sl + 3. * dI_ncx_sl_ds3_sl * ds3_sl_dNa_sl) / (Frdy * Vsl); d_states[n_nodes * STATE_Na_sl + i] = Na_sl + (fabs(dNa_sl_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dNa_sl_dt_linearized)) * dNa_sl_dt / dNa_sl_dt_linearized : dt * dNa_sl_dt); const double dNa_i_dt = J_na_slmyo * (-Na_i + Na_sl) / Vmyo; const double dNa_i_dt_linearized = -J_na_slmyo / Vmyo; d_states[n_nodes * STATE_Na_i + i] = Na_i + (fabs(dNa_i_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dNa_i_dt_linearized)) * dNa_i_dt / dNa_i_dt_linearized : dt * dNa_i_dt); // Expressions for the K Concentration component const double I_K_tot = -2. * I_nak + I_CaK + I_K1 + I_kp + I_kr + I_ks + I_to; const double dK_i_dt = 0.; d_states[n_nodes * STATE_K_i + i] = dt * dK_i_dt + K_i; // Expressions for the Ca Concentrations component const double I_Ca_tot_junc = -2. * I_ncx_junc + I_Ca_junc + I_cabk_junc + I_pca_junc; const double I_Ca_tot_sl = -2. * I_ncx_sl + I_Ca_sl + I_cabk_sl + I_pca_sl; const double dCa_j_dt = -J_CaB_junction + J_ca_juncsl * (-Ca_j + Ca_sl) / Vjunc + J_SRCarel * Vsr / Vjunc + J_SRleak * Vmyo / Vjunc - Cmem * I_Ca_tot_junc / (2. * Frdy * Vjunc); const double dI_ncx_junc_ds2_junc = -Fjunc * IbarNCX * pow(Q10NCX, Qpow) * Ka_junc / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_junc); const double dKa_junc_dCa_j = 2. * (Kdact * Kdact) / (((1. + (Kdact * Kdact) / (Ca_j * Ca_j)) * (1. + (Kdact * Kdact) / (Ca_j * Ca_j))) * (Ca_j * Ca_j * Ca_j)); const double ds3_junc_dCa_j = (Nao * Nao * Nao) + (KmNao * KmNao * KmNao) * (1. + Ca_j / KmCai) + (KmNao * KmNao * KmNao) * Ca_j / KmCai; const double deca_junc_dCa_j = -1. / (2. * Ca_j * FoRT); const double dI_ncx_junc_dKa_junc = Fjunc * IbarNCX * pow(Q10NCX, Qpow) * (-s2_junc + s1_junc) / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_junc); const double dJ_CaB_junction_dCa_j = kon_slh * (-SLH_j + Bmax_SLhighj) + kon_sll * (-SLL_j + Bmax_SLlowj); const double dibarca_j_dCa_j = 1.364 * Frdy * GCaL * pCa * FoRT * V_m * exp(2. * FoRT * V_m) / (-1. + exp(2. * FoRT * V_m)); const double dI_Ca_junc_dibarca_j = 0.45 * Fjunc_CaL * pow(Q10CaL, Qpow) * (1. + fcaCaj - f_Ca_Bj) * d * f; const double ds2_junc_dCa_j = (Nao * Nao * Nao) * exp((-1. + nu) * FoRT * V_m); const double dI_cabk_junc_deca_junc = -Fjunc * GCaB; const double dI_pca_junc_dCa_j = 1.6 * Fjunc * IbarSLCaP * pow(Q10SLCaP, Qpow) * pow(Ca_j, 0.6) / (pow(KmPCa, 1.6) + pow(Ca_j, 1.6)) - 1.6 * Fjunc * IbarSLCaP * pow(Q10SLCaP, Qpow) * pow(Ca_j, 2.2) / ((pow(KmPCa, 1.6) + pow(Ca_j, 1.6)) * (pow(KmPCa, 1.6) + pow(Ca_j, 1.6))); const double dJ_SRCarel_dCa_j = -ks * Ry_Ro; const double dCa_j_dt_linearized = -dJ_CaB_junction_dCa_j - J_ca_juncsl / Vjunc - 5.348e-6 * Vmyo / Vjunc + Vsr * dJ_SRCarel_dCa_j / Vjunc - Cmem * (dI_Ca_junc_dibarca_j * dibarca_j_dCa_j + dI_cabk_junc_deca_junc * deca_junc_dCa_j - 2. * dI_ncx_junc_dKa_junc * dKa_junc_dCa_j - 2. * dI_ncx_junc_ds2_junc * ds2_junc_dCa_j - 2. * dI_ncx_junc_ds3_junc * ds3_junc_dCa_j + dI_pca_junc_dCa_j) / (2. * Frdy * Vjunc); d_states[n_nodes * STATE_Ca_j + i] = Ca_j + (fabs(dCa_j_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dCa_j_dt_linearized)) * dCa_j_dt / dCa_j_dt_linearized : dt * dCa_j_dt); const double dCa_sl_dt = -J_CaB_sl + J_ca_juncsl * (-Ca_sl + Ca_j) / Vsl + J_ca_slmyo * (-Ca_sl + Ca_i) / Vsl - Cmem * I_Ca_tot_sl / (2. * Frdy * Vsl); const double dI_Ca_sl_dibarca_sl = 0.45 * pow(Q10CaL, Qpow) * (1. + fcaCaMSL - f_Ca_Bsl) * Fsl_CaL * d * f; const double dI_ncx_sl_ds2_sl = -IbarNCX * pow(Q10NCX, Qpow) * Fsl * Ka_sl / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_sl); const double dKa_sl_dCa_sl = 2. * (Kdact * Kdact) / (((1. + (Kdact * Kdact) / (Ca_sl * Ca_sl)) * (1. + (Kdact * Kdact) / (Ca_sl * Ca_sl))) * (Ca_sl * Ca_sl * Ca_sl)); const double deca_sl_dCa_sl = -1. / (2. * Ca_sl * FoRT); const double dibarca_sl_dCa_sl = 1.364 * Frdy * GCaL * pCa * FoRT * V_m * exp(2. * FoRT * V_m) / (-1. + exp(2. * FoRT * V_m)); const double dI_cabk_sl_deca_sl = -GCaB * Fsl; const double ds3_sl_dCa_sl = (Nao * Nao * Nao) + (KmNao * KmNao * KmNao) * (1. + Ca_sl / KmCai) + (KmNao * KmNao * KmNao) * Ca_sl / KmCai; const double dI_ncx_sl_dKa_sl = IbarNCX * pow(Q10NCX, Qpow) * (-s2_sl + s1_sl) * Fsl / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_sl); const double ds2_sl_dCa_sl = (Nao * Nao * Nao) * exp((-1. + nu) * FoRT * V_m); const double dI_pca_sl_dCa_sl = 1.6 * IbarSLCaP * pow(Q10SLCaP, Qpow) * pow(Ca_sl, 0.6) * Fsl / (pow(KmPCa, 1.6) + pow(Ca_sl, 1.6)) - 1.6 * IbarSLCaP * pow(Q10SLCaP, Qpow) * pow(Ca_sl, 2.2) * Fsl / ((pow(KmPCa, 1.6) + pow(Ca_sl, 1.6)) * (pow(KmPCa, 1.6) + pow(Ca_sl, 1.6))); const double dJ_CaB_sl_dCa_sl = kon_slh * (-SLH_sl + Bmax_SLhighsl) + kon_sll * (-SLL_sl + Bmax_SLlowsl); const double dCa_sl_dt_linearized = -dJ_CaB_sl_dCa_sl - J_ca_juncsl / Vsl - J_ca_slmyo / Vsl - Cmem * (dI_Ca_sl_dibarca_sl * dibarca_sl_dCa_sl + dI_cabk_sl_deca_sl * deca_sl_dCa_sl - 2. * dI_ncx_sl_dKa_sl * dKa_sl_dCa_sl - 2. * dI_ncx_sl_ds2_sl * ds2_sl_dCa_sl - 2. * dI_ncx_sl_ds3_sl * ds3_sl_dCa_sl + dI_pca_sl_dCa_sl) / (2. * Frdy * Vsl); d_states[n_nodes * STATE_Ca_sl + i] = Ca_sl + (fabs(dCa_sl_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dCa_sl_dt_linearized)) * dCa_sl_dt / dCa_sl_dt_linearized : dt * dCa_sl_dt); const double dCa_i_dt = -J_CaB_cytosol + J_ca_slmyo * (-Ca_i + Ca_sl) / Vmyo - J_serca * Vsr / Vmyo; const double dJ_serca_dCa_i = Vmax_SRCaP * hillSRCaP * pow(Q10SRCaP, Qpow) * pow(Ca_i / Kmf, hillSRCaP) / ((1. + pow(Ca_i / Kmf, hillSRCaP) + pow(Ca_sr / Kmr, hillSRCaP)) * Ca_i) - Vmax_SRCaP * hillSRCaP * pow(Q10SRCaP, Qpow) * pow(Ca_i / Kmf, hillSRCaP) * (pow(Ca_i / Kmf, hillSRCaP) - pow(Ca_sr / Kmr, hillSRCaP)) / (((1. + pow(Ca_i / Kmf, hillSRCaP) + pow(Ca_sr / Kmr, hillSRCaP)) * (1. + pow(Ca_i / Kmf, hillSRCaP) + pow(Ca_sr / Kmr, hillSRCaP))) * Ca_i); const double dJ_CaB_cytosol_dCa_i = kon_cam * (Bmax_CaM - CaM) + kon_myoca * (Bmax_myosin - Myo_c - Myo_m) + kon_sr * (Bmax_SR - SRB) + kon_tnchca * (Bmax_TnChigh - Tn_CHc - Tn_CHm) + kon_tncl * (Bmax_TnClow - Tn_CL); const double dCa_i_dt_linearized = -dJ_CaB_cytosol_dCa_i - J_ca_slmyo / Vmyo - Vsr * dJ_serca_dCa_i / Vmyo; d_states[n_nodes * STATE_Ca_i + i] = Ca_i + (fabs(dCa_i_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dCa_i_dt_linearized)) * dCa_i_dt / dCa_i_dt_linearized : dt * dCa_i_dt); // Expressions for the Membrane potential component const double i_Stim = (t - stim_period * floor(t / stim_period) <= stim_duration + stim_start && t - stim_period * floor(t / stim_period) >= stim_start ? -stim_amplitude : 0.); const double I_Na_tot = I_Na_tot_junc + I_Na_tot_sl; const double I_Cl_tot = I_ClCa + I_Clbk; const double I_Ca_tot = I_Ca_tot_junc + I_Ca_tot_sl; const double I_tot = I_Ca_tot + I_Cl_tot + I_K_tot + I_Na_tot; const double dV_m_dt = -I_tot - i_Stim; const double ds2_sl_dV_m = (Nao * Nao * Nao) * (-1. + nu) * Ca_sl * FoRT * exp((-1. + nu) * FoRT * V_m); const double dI_nak_junc_dfnak = Fjunc * IbarNaK * Ko / ((1. + (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) / (((Na_j) * (Na_j)) * ((Na_j) * (Na_j)))) * (KmKo + Ko)); const double daki_dV_m = -1.78913955652069e-7 * exp(0.2385 * V_m - 0.2385 * ek) / ((1. + 7.35454251046446e-7 * exp(0.2385 * V_m - 0.2385 * ek)) * (1. + 7.35454251046446e-7 * exp(0.2385 * V_m - 0.2385 * ek))); const double dkiss_dbki = -aki / ((aki + bki) * (aki + bki)); const double dkiss_daki = 1.0 / (aki + bki) - aki / ((aki + bki) * (aki + bki)); const double dbki_dV_m = (7.12227979727698e-18 * exp(0.06175 * V_m - 0.06175 * ek) + 0.0612539602025867 * exp(0.08032 * V_m - 0.08032 * ek)) / (1. + 0.0867722941576933 * exp(0.5143 * ek - 0.5143 * V_m)) + 0.0446269908853017 * (0.762624006506308 * exp(0.08032 * V_m - 0.08032 * ek) + 1.15340563518656e-16 * exp(0.06175 * V_m - 0.06175 * ek)) * exp(0.5143 * ek - 0.5143 * V_m) / ((1. + 0.0867722941576933 * exp(0.5143 * ek - 0.5143 * V_m)) * (1. + 0.0867722941576933 * exp(0.5143 * ek - 0.5143 * V_m))); const double dI_K1_dV_m = 0.430331482911935 * GK1 * sqrt(Ko) * kiss + 0.430331482911935 * GK1 * sqrt(Ko) * (-ek + V_m) * (daki_dV_m * dkiss_daki + dbki_dV_m * dkiss_dbki); const double dI_kp_junc_dkp_kp = Fjunc * GKp * (-ek + V_m); const double dibarca_sl_dV_m = 4. * Frdy * GCaL * pCa * (-0.341 * Cao + 0.341 * Ca_sl * exp(2. * FoRT * V_m)) * FoRT / (-1. + exp(2. * FoRT * V_m)) - 8. * Frdy * GCaL * pCa * (FoRT * FoRT) * (-0.341 * Cao + 0.341 * Ca_sl * exp(2. * FoRT * V_m)) * V_m * exp(2. * FoRT * V_m) / ((-1. + exp(2. * FoRT * V_m)) * (-1. + exp(2. * FoRT * V_m))) + 2.728 * Frdy * GCaL * pCa * (FoRT * FoRT) * Ca_sl * V_m * exp(2. * FoRT * V_m) / (-1. + exp(2. * FoRT * V_m)); const double ds2_junc_dV_m = (Nao * Nao * Nao) * (-1. + nu) * Ca_j * FoRT * exp((-1. + nu) * FoRT * V_m); const double dibarna_j_dV_m = Frdy * GCaL * pNa * (-0.75 * Nao + 0.75 * Na_j * exp(FoRT * V_m)) * FoRT / (-1. + exp(FoRT * V_m)) - Frdy * GCaL * pNa * (FoRT * FoRT) * (-0.75 * Nao + 0.75 * Na_j * exp(FoRT * V_m)) * V_m * exp(FoRT * V_m) / ((-1. + exp(FoRT * V_m)) * (-1. + exp(FoRT * V_m))) + 0.75 * Frdy * GCaL * pNa * (FoRT * FoRT) * Na_j * V_m * exp(FoRT * V_m) / (-1. + exp(FoRT * V_m)); const double dibarna_sl_dV_m = Frdy * GCaL * pNa * (-0.75 * Nao + 0.75 * Na_sl * exp(FoRT * V_m)) * FoRT / (-1. + exp(FoRT * V_m)) - Frdy * GCaL * pNa * (FoRT * FoRT) * (-0.75 * Nao + 0.75 * Na_sl * exp(FoRT * V_m)) * V_m * exp(FoRT * V_m) / ((-1. + exp(FoRT * V_m)) * (-1. + exp(FoRT * V_m))) + 0.75 * Frdy * GCaL * pNa * (FoRT * FoRT) * Na_sl * V_m * exp(FoRT * V_m) / (-1. + exp(FoRT * V_m)); const double dI_kr_drkr = (-ek + V_m) * gkr * x_kr; const double dI_tos_dV_m = GtoSlow * x_to_s * y_to_s; const double dI_ks_sl_dV_m = (x_ks * x_ks) * Fsl * gks_sl; const double dI_ClCa_junc_dV_m = Fjunc * GClCa / (1. + KdClCa / Ca_j); const double drkr_dV_m = -exp(37. / 12. + V_m / 24.) / (24. * ((1. + exp(37. / 12. + V_m / 24.)) * (1. + exp(37. / 12. + V_m / 24.)))); const double dI_kr_dV_m = gkr * rkr * x_kr + (-ek + V_m) * drkr_dV_m * gkr * x_kr; const double dfnak_dV_m = (0.01245 * FoRT * exp(-0.1 * FoRT * V_m) + 0.0365 * FoRT * exp(-FoRT * V_m) * sigma) / ((1. + 0.1245 * exp(-0.1 * FoRT * V_m) + 0.0365 * exp(-FoRT * V_m) * sigma) * (1. + 0.1245 * exp(-0.1 * FoRT * V_m) + 0.0365 * exp(-FoRT * V_m) * sigma)); const double ds1_junc_dV_m = Cao * nu * (Na_j * Na_j * Na_j) * FoRT * exp(nu * FoRT * V_m); const double dI_tof_dV_m = GtoFast * x_to_f * y_to_f; const double dI_ks_junc_dV_m = Fjunc * (x_ks * x_ks) * gks_junc; const double ds1_sl_dV_m = Cao * nu * (Na_sl * Na_sl * Na_sl) * FoRT * exp(nu * FoRT * V_m); const double dI_ncx_sl_dV_m = IbarNCX * pow(Q10NCX, Qpow) * (-ds2_sl_dV_m + ds1_sl_dV_m) * Fsl * Ka_sl / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_sl) - IbarNCX * ksat * pow(Q10NCX, Qpow) * (-1. + nu) * (-s2_sl + s1_sl) * FoRT * Fsl * Ka_sl * exp((-1. + nu) * FoRT * V_m) / (((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * (1. + ksat * exp((-1. + nu) * FoRT * V_m))) * s3_sl); const double dI_CaK_dibark = 0.45 * pow(Q10CaL, Qpow) * (Fjunc_CaL * (1. + fcaCaj - f_Ca_Bj) + (1. + fcaCaMSL - f_Ca_Bsl) * Fsl_CaL) * d * f; const double dI_K1_dkiss = 0.430331482911935 * GK1 * sqrt(Ko) * (-ek + V_m); const double dI_Na_junc_dV_m = Fjunc * GNa * (m * m * m) * h * j; const double dI_kp_sl_dkp_kp = GKp * (-ek + V_m) * Fsl; const double dI_Na_sl_dV_m = GNa * (m * m * m) * Fsl * h * j; const double dkp_kp_dV_m = 298.741733340907 * exp(-0.167224080267559 * V_m) / ((1. + 1786.47556537862 * exp(-0.167224080267559 * V_m)) * (1. + 1786.47556537862 * exp(-0.167224080267559 * V_m))); const double dI_ncx_junc_dV_m = Fjunc * IbarNCX * pow(Q10NCX, Qpow) * (-ds2_junc_dV_m + ds1_junc_dV_m) * Ka_junc / ((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * s3_junc) - Fjunc * IbarNCX * ksat * pow(Q10NCX, Qpow) * (-1. + nu) * (-s2_junc + s1_junc) * FoRT * Ka_junc * exp((-1. + nu) * FoRT * V_m) / (((1. + ksat * exp((-1. + nu) * FoRT * V_m)) * (1. + ksat * exp((-1. + nu) * FoRT * V_m))) * s3_junc); const double dI_ClCa_sl_dV_m = GClCa * Fsl / (1. + KdClCa / Ca_sl); const double dI_kp_junc_dV_m = Fjunc * GKp * kp_kp + Fjunc * GKp * (-ek + V_m) * dkp_kp_dV_m; const double dibarca_j_dV_m = 4. * Frdy * GCaL * pCa * (-0.341 * Cao + 0.341 * Ca_j * exp(2. * FoRT * V_m)) * FoRT / (-1. + exp(2. * FoRT * V_m)) - 8. * Frdy * GCaL * pCa * (FoRT * FoRT) * (-0.341 * Cao + 0.341 * Ca_j * exp(2. * FoRT * V_m)) * V_m * exp(2. * FoRT * V_m) / ((-1. + exp(2. * FoRT * V_m)) * (-1. + exp(2. * FoRT * V_m))) + 2.728 * Frdy * GCaL * pCa * (FoRT * FoRT) * Ca_j * V_m * exp(2. * FoRT * V_m) / (-1. + exp(2. * FoRT * V_m)); const double dibark_dV_m = Frdy * GCaL * pK * (-0.75 * Ko + 0.75 * K_i * exp(FoRT * V_m)) * FoRT / (-1. + exp(FoRT * V_m)) - Frdy * GCaL * pK * (FoRT * FoRT) * (-0.75 * Ko + 0.75 * K_i * exp(FoRT * V_m)) * V_m * exp(FoRT * V_m) / ((-1. + exp(FoRT * V_m)) * (-1. + exp(FoRT * V_m))) + 0.75 * Frdy * GCaL * pK * (FoRT * FoRT) * K_i * V_m * exp(FoRT * V_m) / (-1. + exp(FoRT * V_m)); const double dI_kp_sl_dV_m = GKp * Fsl * kp_kp + GKp * (-ek + V_m) * Fsl * dkp_kp_dV_m; const double dI_nak_sl_dfnak = IbarNaK * Ko * Fsl / ((1. + (((KmNaip) * (KmNaip)) * ((KmNaip) * (KmNaip))) / (((Na_sl) * (Na_sl)) * ((Na_sl) * (Na_sl)))) * (KmKo + Ko)); const double dV_m_dt_linearized = -GClB - dI_ClCa_junc_dV_m - dI_ClCa_sl_dV_m - dI_K1_dV_m - dI_Na_junc_dV_m - dI_Na_sl_dV_m - dI_kp_junc_dV_m - dI_kp_sl_dV_m - dI_kr_dV_m - dI_ks_junc_dV_m - dI_ks_sl_dV_m - dI_ncx_junc_dV_m - dI_ncx_sl_dV_m - dI_tof_dV_m - dI_tos_dV_m - Fjunc * GCaB - Fjunc * GNaB - GCaB * Fsl - GNaB * Fsl - (daki_dV_m * dkiss_daki + dbki_dV_m * dkiss_dbki) * dI_K1_dkiss - dI_CaK_dibark * dibark_dV_m - dI_CaNa_junc_dibarna_j * dibarna_j_dV_m - dI_CaNa_sl_dibarna_sl * dibarna_sl_dV_m - dI_Ca_junc_dibarca_j * dibarca_j_dV_m - dI_Ca_sl_dibarca_sl * dibarca_sl_dV_m - dI_kp_junc_dkp_kp * dkp_kp_dV_m - dI_kp_sl_dkp_kp * dkp_kp_dV_m - dI_kr_drkr * drkr_dV_m - dI_nak_junc_dfnak * dfnak_dV_m - dI_nak_sl_dfnak * dfnak_dV_m - dI_ncx_junc_ds1_junc * ds1_junc_dV_m - dI_ncx_junc_ds2_junc * ds2_junc_dV_m - dI_ncx_sl_ds1_sl * ds1_sl_dV_m - dI_ncx_sl_ds2_sl * ds2_sl_dV_m; d_states[n_nodes * STATE_V_m + i] = (fabs(dV_m_dt_linearized) > 1.0e-8 ? (-1.0 + exp(dt * dV_m_dt_linearized)) * dV_m_dt / dV_m_dt_linearized : dt * dV_m_dt) + V_m; } }
ThreadedFriends.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.6 -------------------------------------------------*/ /* date: 6/15/2017 ---------------------------------------------*/ /* authors: Ariful Azad, Aydin Buluc --------------------------*/ /****************************************************************/ /* Copyright (c) 2010-2017, The Regents of the University of California Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _THREADED_FRIENDS_H_ #define _THREADED_FRIENDS_H_ #include <iostream> #include "SpMat.h" // Best to include the base class first #include "SpHelper.h" #include "StackEntry.h" #include "Isect.h" #include "Deleter.h" #include "SpImpl.h" #include "SpParHelper.h" #include "Compare.h" #include "CombBLAS.h" #include "PreAllocatedSPA.h" namespace combblas { template <class IU, class NU> class SpTuples; template <class IU, class NU> class SpDCCols; template <class IU, class NU> class Dcsc; // multithreaded HeapSpGEMM template <typename SR, typename NTO, typename IT, typename NT1, typename NT2> SpTuples<IT, NTO> * LocalSpGEMM (const SpDCCols<IT, NT1> & A, const SpDCCols<IT, NT2> & B, bool clearA, bool clearB) { IT mdim = A.getnrow(); IT ndim = B.getncol(); IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return new SpTuples<IT, NTO>(0, mdim, ndim); } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); IT nA = A.getncol(); IT cnzmax = Adcsc->nz + Bdcsc->nz; // estimate on the size of resulting matrix C float cf = static_cast<float>(nA+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size IT * aux; Adcsc->ConstructAux(nA, aux); int numThreads = 1; // default case #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* colnnzC = estimateNNZ(A, B); IT* colptrC = prefixsum<IT>(colnnzC, Bdcsc->nzc, numThreads); delete [] colnnzC; IT nnzc = colptrC[Bdcsc->nzc]; std::tuple<IT,IT,NTO> * tuplesC = static_cast<std::tuple<IT,IT,NTO> *> (::operator new (sizeof(std::tuple<IT,IT,NTO>[nnzc]))); // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); std::vector<std::vector<HeapEntry<IT,NT1>>> globalheapVec(numThreads); for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); globalheapVec[i].resize(nnzA/numThreads); } #pragma omp parallel for for(int i=0; i < Bdcsc->nzc; ++i) { IT nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = omp_get_thread_num(); if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); globalheapVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); HeapEntry<IT,NT1> * wset = globalheapVec[myThread].data(); IT hsize = 0; for(IT j = 0; (unsigned)j < nnzcolB; ++j) // create the initial heap { if(colinds[j].first != colinds[j].second) // current != end { wset[hsize++] = HeapEntry< IT,NT1 > (Adcsc->ir[colinds[j].first], j, Adcsc->numx[colinds[j].first]); } } std:make_heap(wset, wset+hsize); IT curptr = colptrC[i]; while(hsize > 0) { std::pop_heap(wset, wset + hsize); // result is stored in wset[hsize-1] IT locb = wset[hsize-1].runr; // relative location of the nonzero in B's current column NTO mrhs = SR::multiply(wset[hsize-1].num, Bdcsc->numx[Bdcsc->cp[i]+locb]); if (!SR::returnedSAID()) { if( (curptr > colptrC[i]) && std::get<0>(tuplesC[curptr-1]) == wset[hsize-1].key) { std::get<2>(tuplesC[curptr-1]) = SR::add(std::get<2>(tuplesC[curptr-1]), mrhs); } else { tuplesC[curptr++]= std::make_tuple(wset[hsize-1].key, Bdcsc->jc[i], mrhs) ; } } if( (++(colinds[locb].first)) != colinds[locb].second) // current != end { // runr stays the same ! wset[hsize-1].key = Adcsc->ir[colinds[locb].first]; wset[hsize-1].num = Adcsc->numx[colinds[locb].first]; std::push_heap(wset, wset+hsize); } else { --hsize; } } } if(clearA) delete const_cast<SpDCCols<IT, NT1> *>(&A); if(clearB) delete const_cast<SpDCCols<IT, NT2> *>(&B); delete [] colptrC; delete [] aux; SpTuples<IT, NTO>* spTuplesC = new SpTuples<IT, NTO> (nnzc, mdim, ndim, tuplesC, true); return spTuplesC; } } #endif
cg.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - CG This benchmark is an OpenMP C version of the NPB CG code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: M. Yarrow C. Kuszmaul OpenMP C version: S. Satoh 3.0 structure translation: F. Conti --------------------------------------------------------------------*/ /* c--------------------------------------------------------------------- c Note: please observe that in the routine conj_grad three c implementations of the sparse matrix-vector multiply have c been supplied. The default matrix-vector multiply is not c loop unrolled. The alternate implementations are unrolled c to a depth of 2 and unrolled to a depth of 8. Please c experiment with these to find the fastest for your particular c architecture. If reporting timing results, any of these three may c be used without penalty. c--------------------------------------------------------------------- */ #include "../common/npb-C.h" #include "npbparams.h" #define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2) /* global variables */ /* common /partit_size/ */ #include <omp.h> static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; /* common /main_int_mem/ */ /* colidx[1:NZ] */ static int colidx[2198001]; /* rowstr[1:NA+1] */ static int rowstr[14002]; /* iv[1:2*NA+1] */ static int iv[28002]; /* arow[1:NZ] */ static int arow[2198001]; /* acol[1:NZ] */ static int acol[2198001]; /* common /main_flt_mem/ */ /* v[1:NA+1] */ static double v[14002]; /* aelt[1:NZ] */ static double aelt[2198001]; /* a[1:NZ] */ static double a[2198001]; /* x[1:NA+2] */ static double x[14003]; /* z[1:NA+2] */ static double z[14003]; /* p[1:NA+2] */ static double p[14003]; /* q[1:NA+2] */ static double q[14003]; /* r[1:NA+2] */ static double r[14003]; //static double w[NA+2+1]; /* w[1:NA+2] */ /* common /urando/ */ static double amult; static double tran; /* function declarations */ static //double w[], void conj_grad(int colidx[],int rowstr[],double x[],double z[],double a[],double p[],double q[],double r[],double *rnorm); static void makea(int n,int nz,double a[],int colidx[],int rowstr[],int nonzer,int firstrow,int lastrow,int firstcol,int lastcol,double rcond,int arow[],int acol[],double aelt[],double v[],int iv[],double shift); static void sparse(double a[],int colidx[],int rowstr[],int n,int arow[],int acol[],double aelt[],int firstrow,int lastrow,double x[],boolean mark[],int nzloc[],int nnza); static void sprnvc(int n,int nz,double v[],int iv[],int nzloc[],int mark[]); static int icnvrt(double x,int ipwr2); static void vecset(int n,double v[],int iv[],int *nzv,int i,double val); /*-------------------------------------------------------------------- program cg --------------------------------------------------------------------*/ int main(int argc,char **argv) { int i; int j; int k; int it; int nthreads = 1; double zeta; double rnorm; double norm_temp11; double norm_temp12; double t; double mflops; char class; boolean verified; double zeta_verify_value; double epsilon; firstrow = 1; lastrow = 14000; firstcol = 1; lastcol = 14000; if (14000 == 1400 && 11 == 7 && 15 == 15 && 20.0 == 10.0) { class = 'S'; zeta_verify_value = 8.5971775078648; } else if (14000 == 7000 && 11 == 8 && 15 == 15 && 20.0 == 12.0) { class = 'W'; zeta_verify_value = 10.362595087124; } else if (14000 == 14000 && 11 == 11 && 15 == 15 && 20.0 == 20.0) { class = 'A'; zeta_verify_value = 17.130235054029; } else if (14000 == 75000 && 11 == 13 && 15 == 75 && 20.0 == 60.0) { class = 'B'; zeta_verify_value = 22.712745482631; } else if (14000 == 150000 && 11 == 15 && 15 == 75 && 20.0 == 110.0) { class = 'C'; zeta_verify_value = 28.973605592845; } else { class = 'U'; } printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version - CG Benchmark\n"); printf(" Size: %10d\n",14000); printf(" Iterations: %5d\n",15); naa = 14000; nzz = 14000 * (11 + 1) * (11 + 1) + 14000 * (11 + 2); /*-------------------------------------------------------------------- c Initialize random number generator c-------------------------------------------------------------------*/ tran = 314159265.0; amult = 1220703125.0; zeta = randlc(&tran,amult); /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ makea(naa,nzz,a,colidx,rowstr,11,firstrow,lastrow,firstcol,lastcol,1.0e-1,arow,acol,aelt,v,iv,20.0); /*--------------------------------------------------------------------- c Note: as a result of the above call to makea: c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1 c values of colidx which are col indexes go from firstcol --> lastcol c So: c Shift the col index vals from actual (firstcol --> lastcol ) c to local, i.e., (1 --> lastcol-firstcol+1) c---------------------------------------------------------------------*/ { for (j = 1; j <= lastrow - firstrow + 1; j += 1) { #pragma omp parallel for private (k) for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) { colidx[k] = colidx[k] - firstcol + 1; } } /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ #pragma omp parallel for private (i) for (i = 1; i <= 14001; i += 1) { x[i] = 1.0; } #pragma omp parallel for private (j) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { q[j] = 0.0; z[j] = 0.0; r[j] = 0.0; p[j] = 0.0; } // end omp parallel } zeta = 0.0; /*------------------------------------------------------------------- c----> c Do one iteration untimed to init all code and data page tables c----> (then reinit, start timing, to niter its) c-------------------------------------------------------------------*/ for (it = 1; it <= 1; it += 1) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ /* w,*/ conj_grad(colidx,rowstr,x,z,a,p,q,r,&rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ norm_temp11 = 0.0; norm_temp12 = 0.0; #pragma omp parallel for private (j) reduction (+:norm_temp11,norm_temp12) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { norm_temp11 = norm_temp11 + x[j] * z[j]; norm_temp12 = norm_temp12 + z[j] * z[j]; } norm_temp12 = 1.0 / sqrt(norm_temp12); /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) firstprivate (norm_temp12) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { x[j] = norm_temp12 * z[j]; } /* end of do one iteration untimed */ } /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ #pragma omp parallel for private (i) for (i = 1; i <= 14001; i += 1) { x[i] = 1.0; } zeta = 0.0; timer_clear(1); timer_start(1); /*-------------------------------------------------------------------- c----> c Main Iteration for inverse power method c----> c-------------------------------------------------------------------*/ for (it = 1; it <= 15; it += 1) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ /*, w*/ conj_grad(colidx,rowstr,x,z,a,p,q,r,&rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ norm_temp11 = 0.0; norm_temp12 = 0.0; #pragma omp parallel for private (j) reduction (+:norm_temp11,norm_temp12) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { norm_temp11 = norm_temp11 + x[j] * z[j]; norm_temp12 = norm_temp12 + z[j] * z[j]; } norm_temp12 = 1.0 / sqrt(norm_temp12); zeta = 20.0 + 1.0 / norm_temp11; if (it == 1) { printf(" iteration ||r|| zeta\n"); } printf(" %5d %20.14e%20.13e\n",it,rnorm,zeta); /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) firstprivate (norm_temp12) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { x[j] = norm_temp12 * z[j]; } /* end of main iter inv pow meth */ } { //#if defined(_OPENMP) // nthreads = omp_get_num_threads(); //#endif /* _OPENMP */ /* end parallel */ } timer_stop(1); /*-------------------------------------------------------------------- c End of timed section c-------------------------------------------------------------------*/ t = timer_read(1); printf(" Benchmark completed\n"); epsilon = 1.0e-10; //epsilon = 1.0e-2; if (class != 'U') { if (fabs(zeta - zeta_verify_value) <= epsilon) { verified = 1; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.12e\n",zeta); printf(" Error is %20.12e\n",zeta - zeta_verify_value); } else { verified = 0; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.12e\n",zeta); printf(" The correct zeta is %20.12e\n",zeta_verify_value); } } else { verified = 0; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if (t != 0.0) { mflops = 2.0 * 15 * 14000 * (3.0 + (11 * (11 + 1)) + 25.0 * (5.0 + (11 * (11 + 1))) + 3.0) / t / 1000000.0; } else { mflops = 0.0; } c_print_results("CG",class,14000,0,0,15,nthreads,t,mflops," floating point",verified,"3.0 structured","14 Jan 2020","(none)","(none)","-lm","(none)","(none)","(none)","randdp"); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void conj_grad( /* colidx[1:nzz] */ int colidx[], /* rowstr[1:naa+1] */ int rowstr[], /* x[*] */ double x[], /* z[*] */ double z[], /* a[1:nzz] */ double a[], /* p[*] */ double p[], /* q[*] */ double q[], /* r[*] */ double r[], //double w[], /* w[*] */ double *rnorm) /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*--------------------------------------------------------------------- c Floaging point arrays here are named as in NPB1 spec discussion of c CG algorithm c---------------------------------------------------------------------*/ { static int callcount = 0; double d; double sum; double rho; double rho0; double alpha; double beta; int i; int j; int k; int cgit; int cgitmax = 25; rho = 0.0; /*-------------------------------------------------------------------- c Initialize the CG algorithm: c-------------------------------------------------------------------*/ { #pragma omp parallel for private (j) firstprivate (naa) for (j = 1; j <= naa + 1; j += 1) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; //w[j] = 0.0; } /*-------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) reduction (+:rho) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { rho = rho + r[j] * r[j]; } /* end omp parallel */ } /*-------------------------------------------------------------------- c----> c The conj grad iteration loop c----> c-------------------------------------------------------------------*/ for (cgit = 1; cgit <= cgitmax; cgit += 1) { rho0 = rho; d = 0.0; rho = 0.0; { /*-------------------------------------------------------------------- c q = A.p c The partition submatrix-vector multiply: use workspace w c--------------------------------------------------------------------- C C NOTE: this version of the multiply is actually (slightly: maybe %5) C faster on the sp2 on 16 nodes than is the unrolled-by-2 version C below. On the Cray t3d, the reverse is true, i.e., the C unrolled-by-two version is some 10% faster. C The unrolled-by-8 version below is significantly faster C on the Cray t3d - overall speed of code is 1.5 times faster. */ /* rolled version */ #pragma omp parallel for private (sum,j,k) for (j = 1; j <= lastrow - firstrow + 1; j += 1) { sum = 0.0; #pragma omp parallel for private (k) reduction (+:sum) for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) { sum = sum + a[k] * p[colidx[k]]; } //w[j] = sum; q[j] = sum; } /* unrolled-by-two version for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; double sum1, sum2; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 2; sum1 = 0.0; sum2 = 0.0; if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]]; for (k = i+iresidue; k <= rowstr[j+1]-2; k += 2) { sum1 = sum1 + a[k] * p[colidx[k]]; sum2 = sum2 + a[k+1] * p[colidx[k+1]]; } w[j] = sum1 + sum2; } */ /* unrolled-by-8 version for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 8; sum = 0.0; for (k = i; k <= i+iresidue-1; k++) { sum = sum + a[k] * p[colidx[k]]; } for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) { sum = sum + a[k ] * p[colidx[k ]] + a[k+1] * p[colidx[k+1]] + a[k+2] * p[colidx[k+2]] + a[k+3] * p[colidx[k+3]] + a[k+4] * p[colidx[k+4]] + a[k+5] * p[colidx[k+5]] + a[k+6] * p[colidx[k+6]] + a[k+7] * p[colidx[k+7]]; } w[j] = sum; } */ /* for (j = 1; j <= lastcol-firstcol+1; j++) { q[j] = w[j]; } */ /*-------------------------------------------------------------------- c Clear w for reuse... c-------------------------------------------------------------------*/ /* for (j = 1; j <= lastcol-firstcol+1; j++) { w[j] = 0.0; } */ /*-------------------------------------------------------------------- c Obtain p.q c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) reduction (+:d) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { d = d + p[j] * q[j]; } /*-------------------------------------------------------------------- c Obtain alpha = rho / (p.q) c-------------------------------------------------------------------*/ alpha = rho0 / d; /*-------------------------------------------------------------------- c Save a temporary of rho c-------------------------------------------------------------------*/ /* rho0 = rho;*/ /*--------------------------------------------------------------------- c Obtain z = z + alpha*p c and r = r - alpha*q c---------------------------------------------------------------------*/ #pragma omp parallel for private (j) reduction (+:rho) firstprivate (alpha) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { z[j] = z[j] + alpha * p[j]; r[j] = r[j] - alpha * q[j]; // } /*--------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c---------------------------------------------------------------------*/ /* for (j = 1; j <= lastcol-firstcol+1; j++) {*/ rho = rho + r[j] * r[j]; } /*-------------------------------------------------------------------- c Obtain beta: c-------------------------------------------------------------------*/ beta = rho / rho0; /*-------------------------------------------------------------------- c p = r + beta*p c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) firstprivate (beta) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { p[j] = r[j] + beta * p[j]; } callcount++; /* end omp parallel */ } /* end of do cgit=1,cgitmax */ } /*--------------------------------------------------------------------- c Compute residual norm explicitly: ||r|| = ||x - A.z|| c First, form A.z c The partition submatrix-vector multiply c---------------------------------------------------------------------*/ sum = 0.0; { #pragma omp parallel for private (d,j,k) firstprivate (firstrow,lastrow) for (j = 1; j <= lastrow - firstrow + 1; j += 1) { d = 0.0; #pragma omp parallel for private (k) reduction (+:d) for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) { d = d + a[k] * z[colidx[k]]; } r[j] = d; } /*-------------------------------------------------------------------- c At this point, r contains A.z c-------------------------------------------------------------------*/ #pragma omp parallel for private (d,j) reduction (+:sum) firstprivate (firstcol,lastcol) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { d = x[j] - r[j]; sum = sum + d * d; } //end omp parallel } *rnorm = sqrt(sum); } /*--------------------------------------------------------------------- c generate the test problem for benchmark 6 c makea generates a sparse matrix with a c prescribed sparsity distribution c c parameter type usage c c input c c n i number of cols/rows of matrix c nz i nonzeros as declared array size c rcond r*8 condition number c shift r*8 main diagonal shift c c output c c a r*8 array for nonzeros c colidx i col indices c rowstr i row pointers c c workspace c c iv, arow, acol i c v, aelt r*8 c---------------------------------------------------------------------*/ static void makea(int n,int nz, /* a[1:nz] */ double a[], /* colidx[1:nz] */ int colidx[], /* rowstr[1:n+1] */ int rowstr[],int nonzer,int firstrow,int lastrow,int firstcol,int lastcol,double rcond, /* arow[1:nz] */ int arow[], /* acol[1:nz] */ int acol[], /* aelt[1:nz] */ double aelt[], /* v[1:n+1] */ double v[], /* iv[1:2*n+1] */ int iv[],double shift) { int i; int nnza; int iouter; int ivelt; int ivelt1; int irow; int nzv; /*-------------------------------------------------------------------- c nonzer is approximately (int(sqrt(nnza /n))); c-------------------------------------------------------------------*/ double size; double ratio; double scale; int jcol; size = 1.0; ratio = pow(rcond,1.0 / ((double )n)); nnza = 0; /*--------------------------------------------------------------------- c Initialize colidx(n+1 .. 2n) to zero. c Used by sprnvc to mark nonzero positions c---------------------------------------------------------------------*/ #pragma omp parallel for private (i) for (i = 1; i <= n; i += 1) { colidx[n + i] = 0; } for (iouter = 1; iouter <= n; iouter += 1) { nzv = nonzer; sprnvc(n,nzv,v,iv,&colidx[0],&colidx[n]); vecset(n,v,iv,&nzv,iouter,0.5); for (ivelt = 1; ivelt <= nzv; ivelt += 1) { jcol = iv[ivelt]; if (jcol >= firstcol && jcol <= lastcol) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1 += 1) { irow = iv[ivelt1]; if (irow >= firstrow && irow <= lastrow) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n",nnza,nz); printf("iouter = %d\n",iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } /*--------------------------------------------------------------------- c ... add the identity * rcond to the generated matrix to bound c the smallest eigenvalue from below by rcond c---------------------------------------------------------------------*/ for (i = firstrow; i <= lastrow; i += 1) { if (i >= firstcol && i <= lastcol) { iouter = n + i; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n",nnza,nz); printf("iouter = %d\n",iouter); exit(1); } acol[nnza] = i; arow[nnza] = i; aelt[nnza] = rcond - shift; } } /*--------------------------------------------------------------------- c ... make the sparse matrix from list of elements with duplicates c (v and iv are used as workspace) c---------------------------------------------------------------------*/ sparse(a,colidx,rowstr,n,arow,acol,aelt,firstrow,lastrow,v,&iv[0],&iv[n],nnza); } /*--------------------------------------------------- c generate a sparse matrix from a list of c [col, row, element] tri c---------------------------------------------------*/ static void sparse( /* a[1:*] */ double a[], /* colidx[1:*] */ int colidx[], /* rowstr[1:*] */ int rowstr[],int n, /* arow[1:*] */ int arow[], /* acol[1:*] */ int acol[], /* aelt[1:*] */ double aelt[],int firstrow,int lastrow, /* x[1:n] */ double x[], /* mark[1:n] */ boolean mark[], /* nzloc[1:n] */ int nzloc[],int nnza) /*--------------------------------------------------------------------- c rows range from firstrow to lastrow c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values c---------------------------------------------------------------------*/ { int nrows; int i; int j; int jajp1; int nza; int k; int nzrow; double xi; /*-------------------------------------------------------------------- c how many rows of result c-------------------------------------------------------------------*/ nrows = lastrow - firstrow + 1; /*-------------------------------------------------------------------- c ...count the number of triples in each row c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) for (j = 1; j <= n; j += 1) { rowstr[j] = 0; mark[j] = 0; } rowstr[n + 1] = 0; for (nza = 1; nza <= nnza; nza += 1) { j = arow[nza] - firstrow + 1 + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows + 1; j += 1) { rowstr[j] = rowstr[j] + rowstr[j - 1]; } /*--------------------------------------------------------------------- c ... rowstr(j) now is the location of the first nonzero c of row j of a c---------------------------------------------------------------------*/ /*--------------------------------------------------------------------- c ... preload data pages c---------------------------------------------------------------------*/ for (j = 0; j <= nrows - 1; j += 1) { #pragma omp parallel for private (k) for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) { a[k] = 0.0; } } /*-------------------------------------------------------------------- c ... do a bucket sort of the triples on the row index c-------------------------------------------------------------------*/ for (nza = 1; nza <= nnza; nza += 1) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } /*-------------------------------------------------------------------- c ... rowstr(j) now points to the first element of row j+1 c-------------------------------------------------------------------*/ for (j = nrows; j >= 1; j += -1) { rowstr[j + 1] = rowstr[j]; } rowstr[1] = 1; /*-------------------------------------------------------------------- c ... generate the actual output rows by adding elements c-------------------------------------------------------------------*/ nza = 0; #pragma omp parallel for private (i) firstprivate (n) for (i = 1; i <= n; i += 1) { x[i] = 0.0; mark[i] = 0; } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j += 1) { nzrow = 0; /*-------------------------------------------------------------------- c ...loop over the jth row of a c-------------------------------------------------------------------*/ for (k = jajp1; k <= rowstr[j + 1] - 1; k += 1) { i = colidx[k]; x[i] = x[i] + a[k]; if (mark[i] == 0 && x[i] != 0.0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; } } /*-------------------------------------------------------------------- c ... extract the nonzeros of this row c-------------------------------------------------------------------*/ for (k = 1; k <= nzrow; k += 1) { i = nzloc[k]; mark[i] = 0; xi = x[i]; x[i] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i; } } jajp1 = rowstr[j + 1]; rowstr[j + 1] = nza + rowstr[1]; } } /*--------------------------------------------------------------------- c generate a sparse n-vector (v, iv) c having nzv nonzeros c c mark(i) is set to 1 if position i is nonzero. c mark is all zero on entry and is reset to all zero before exit c this corrects a performance bug found by John G. Lewis, caused by c reinitialization of mark on every one of the n calls to sprnvc ---------------------------------------------------------------------*/ static void sprnvc(int n,int nz, /* v[1:*] */ double v[], /* iv[1:*] */ int iv[], /* nzloc[1:n] */ int nzloc[], /* mark[1:n] */ int mark[]) { int nn1; int nzrow; int nzv; int ii; int i; double vecelt; double vecloc; nzv = 0; nzrow = 0; nn1 = 1; do { nn1 = 2 * nn1; }while (nn1 < n); /*-------------------------------------------------------------------- c nn1 is the smallest power of two not less than n c-------------------------------------------------------------------*/ while(nzv < nz){ vecelt = randlc(&tran,amult); /*-------------------------------------------------------------------- c generate an integer between 1 and n in a portable manner c-------------------------------------------------------------------*/ vecloc = randlc(&tran,amult); i = icnvrt(vecloc,nn1) + 1; if (i > n) continue; /*-------------------------------------------------------------------- c was this integer generated already? c-------------------------------------------------------------------*/ if (mark[i] == 0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; nzv = nzv + 1; v[nzv] = vecelt; iv[nzv] = i; } } for (ii = 1; ii <= nzrow; ii += 1) { i = nzloc[ii]; mark[i] = 0; } } /*--------------------------------------------------------------------- * scale a double precision number x in (0,1) by a power of 2 and chop it *---------------------------------------------------------------------*/ static int icnvrt(double x,int ipwr2) { return (int )(ipwr2 * x); } /*-------------------------------------------------------------------- c set ith element of sparse vector (v, iv) with c nzv nonzeros to val c-------------------------------------------------------------------*/ static void vecset(int n, /* v[1:*] */ double v[], /* iv[1:*] */ int iv[],int *nzv,int i,double val) { int k; boolean set; set = 0; #pragma omp parallel for private (k) for (k = 1; k <= *nzv; k += 1) { if (iv[k] == i) { v[k] = val; set = 1; } } if (set == 0) { *nzv = *nzv + 1; v[ *nzv] = val; iv[ *nzv] = i; } }
convolution_pack8to1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * 8; for (int k = 0; k < maxk; k++) { float16x8_t _val = vld1q_f16(sptr + space_ofs[k] * 8); float16x8_t _w = vld1q_f16(kptr); float16x8_t _s8 = vmulq_f16(_val, _w); float16x4_t _s4 = vadd_f16(vget_low_f16(_s8), vget_high_f16(_s8)); sum += vaddvq_f32(vcvt_f32_f16(_s4)); // dot kptr += 8; } } sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }