source
stringlengths
3
92
c
stringlengths
26
2.25M
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; int fail = 0; INIT(); // ************************** // Series 1: no dist_schedule // ************************** // // Test: #iterations == #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 2: with dist_schedule // **************************** // // Test: #iterations == #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd dist_schedule(static,1) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd dist_schedule(static,512) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); int ten = 10; int chunkSize = 512/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,1) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,500) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); ten = 10; chunkSize = 500/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,1) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,123) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); ten = 10; chunkSize = 123/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 3: with ds attributes // **************************** // // Test: private // ZERO(A); ZERO(B); double p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) { #pragma omp distribute simd private(p,q) for(int i = 0 ; i < N ; i++) { p = 2; q = 3; A[i] += p; B[i] += q; } } } for(int i = 0 ; i < N ; i++) { if (A[i] != TRIALS*2) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]); fail = 1; } if (B[i] != TRIALS*3) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: firstprivate // ZERO(A); ZERO(B); p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target // implicit firstprivate for p and q, their initial values being 2 and 4 for each target invocation #pragma omp teams num_teams(64) { #pragma omp distribute simd firstprivate(p,q) for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team) q += 7.0; A[i] += p; B[i] += q; } } } for(int i = 0 ; i < 128 ; i++) { if (i % 2 == 0) { if (A[i] != (2.0+3.0)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]); fail = 1; } } else { if (A[i] != (2.0+3.0*2)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0*2)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]); fail = 1; } } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: lastprivate // int lastpriv = -1; #pragma omp target map(tofrom:lastpriv) #pragma omp teams num_teams(10) #pragma omp distribute simd lastprivate(lastpriv) for(int i = 0 ; i < omp_get_num_teams() ; i++) lastpriv = omp_get_team_num(); if(lastpriv != 9) { printf("lastpriv value is %d and should have been %d\n", lastpriv, 9); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // ************************** // Series 4: collapse // ************************** // // Test: 2 loops // double * S = (double *) malloc(N*N*sizeof(double)); double * T = (double *) malloc(N*N*sizeof(double)); double * U = (double *) malloc(N*N*sizeof(double)); for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) { S[i*N+j] = 0.0; T[i*N+j] = 1.0; U[i*N+j] = 2.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N]) #pragma omp teams num_teams(512) #pragma omp distribute simd collapse(2) for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t } for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) if (S[i*N+j] != TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: 3 loops // int M = N/8; double * V = (double *) malloc(M*M*M*sizeof(double)); double * Z = (double *) malloc(M*M*M*sizeof(double)); for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) { V[i*M*M+j*M+k] = 2.0; Z[i*M*M+j*M+k] = 3.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M]) #pragma omp teams num_teams(512) #pragma omp distribute simd collapse(3) for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t } for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); return 0; }
HGEMM_v1.h
#include <iostream> #include <math.h> #include <float.h> #include <assert.h> #include <string.h> #include <stdio.h> #include <stdint.h> #include <cholUtils.h> #ifndef HALIDE_ATTRIBUTE_ALIGN #ifdef _MSC_VER #define HALIDE_ATTRIBUTE_ALIGN(x) __declspec(align(x)) #else #define HALIDE_ATTRIBUTE_ALIGN(x) __attribute__((aligned(x))) #endif #endif #ifndef BUFFER_T_DEFINED #define BUFFER_T_DEFINED #include <stdbool.h> #include <stdint.h> typedef struct buffer_t { uint64_t dev; uint8_t* host; int32_t extent[4]; int32_t stride[4]; int32_t min[4]; int32_t elem_size; HALIDE_ATTRIBUTE_ALIGN(1) bool host_dirty; HALIDE_ATTRIBUTE_ALIGN(1) bool dev_dirty; HALIDE_ATTRIBUTE_ALIGN(1) uint8_t _padding[10 - sizeof(void *)]; } buffer_t; #endif #define __user_context_ NULL #define HSS struct halide_filter_metadata_t; extern "C" { void *sympiler_malloc(void *ctx, size_t s){return(malloc(s));} void sympiler_free(void *ctx, void *ptr){free(ptr);}; } #ifdef _WIN32 float roundf(float); double round(double); #else inline float asinh_f32(float x) {return asinhf(x);} inline float acosh_f32(float x) {return acoshf(x);} inline float atanh_f32(float x) {return atanhf(x);} inline double asinh_f64(double x) {return asinh(x);} inline double acosh_f64(double x) {return acosh(x);} inline double atanh_f64(double x) {return atanh(x);} #endif inline float sqrt_f32(float x) {return sqrtf(x);} inline float sin_f32(float x) {return sinf(x);} inline float asin_f32(float x) {return asinf(x);} inline float cos_f32(float x) {return cosf(x);} inline float acos_f32(float x) {return acosf(x);} inline float tan_f32(float x) {return tanf(x);} inline float atan_f32(float x) {return atanf(x);} inline float sinh_f32(float x) {return sinhf(x);} inline float cosh_f32(float x) {return coshf(x);} inline float tanh_f32(float x) {return tanhf(x);} inline float hypot_f32(float x, float y) {return hypotf(x, y);} inline float exp_f32(float x) {return expf(x);} inline float log_f32(float x) {return logf(x);} inline float pow_f32(float x, float y) {return powf(x, y);} inline float floor_f32(float x) {return floorf(x);} inline float ceil_f32(float x) {return ceilf(x);} inline float round_f32(float x) {return roundf(x);} inline double sqrt_f64(double x) {return sqrt(x);} inline double sin_f64(double x) {return sin(x);} inline double asin_f64(double x) {return asin(x);} inline double cos_f64(double x) {return cos(x);} inline double acos_f64(double x) {return acos(x);} inline double tan_f64(double x) {return tan(x);} inline double atan_f64(double x) {return atan(x);} inline double sinh_f64(double x) {return sinh(x);} inline double cosh_f64(double x) {return cosh(x);} inline double tanh_f64(double x) {return tanh(x);} inline double hypot_f64(double x, double y) {return hypot(x, y);} inline double exp_f64(double x) {return exp(x);} inline double log_f64(double x) {return log(x);} inline double pow_f64(double x, double y) {return pow(x, y);} inline double floor_f64(double x) {return floor(x);} inline double ceil_f64(double x) {return ceil(x);} inline double round_f64(double x) {return round(x);} inline float nan_f32() {return NAN;} inline float neg_inf_f32() {return -INFINITY;} inline float inf_f32() {return INFINITY;} inline bool is_nan_f32(float x) {return x != x;} inline bool is_nan_f64(double x) {return x != x;} inline float float_from_bits(uint32_t bits) { union { uint32_t as_uint; float as_float; } u; u.as_uint = bits; return u.as_float; } inline int64_t make_int64(int32_t hi, int32_t lo) { return (((int64_t)hi) << 32) | (uint32_t)lo; } inline double make_float64(int32_t i0, int32_t i1) { union { int32_t as_int32[2]; double as_double; } u; u.as_int32[0] = i0; u.as_int32[1] = i1; return u.as_double; } template<typename A, typename B> A reinterpret(B b) {A a; memcpy(&a, &b, sizeof(a)); return a;} double one [2]={1.0,0.}, zero [2]={0.,0.}; int sw = false, lb1 = 0, ub1 = 0; double *cur; int info=0; #ifdef __cplusplus extern "C" { #endif int32_t HGEMM(double *D, double *B, double *VT, uint64_t *Dptr, uint64_t *Bptr, int32_t *VTptr, int32_t *lchildren, int32_t *rchildren, int32_t *levelset, int32_t *idx, double *mrhs, double *apres, int32_t nrhs, int32_t *Ddim, int32_t *wptr, int32_t *uptr, double *wskel, int32_t *wskeloffset, double *uskel, int32_t *uskeloffset, int32_t *lm, int32_t *slen, int32_t *wpart, int32_t *clevelset) { #pragma omp parallel for for (int i = 0; i < 512; i++) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, Ddim[i],nrhs,Ddim[i], float_from_bits(1065353216 /* 1 */), &D[Dptr[i]], Ddim[i], &mrhs[wptr[i]], Ddim[i], float_from_bits(0 /* 0 */), &apres[uptr[i]], Ddim[i]); } // for i for (int i = 0; i < 1; i++) { int i=0; int32_t _0 = i + 1; #pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_0]; k++) { int32_t _1 = k + 1; for (int j = wpart[k]; j < wpart[_1]; j++) { int32_t _2 = (int32_t)(4294967295); bool _3 = lchildren[idx[j]] == _2; if (_3) { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,Ddim[lm[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[lchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _5 = _4 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[_5], slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else } // for j } // for k // for i for (int i = 1; i < 3; i++) { int32_t _0 = i + 1; #pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_0]; k++) { int32_t _1 = k + 1; for (int j = wpart[k]; j < wpart[_1]; j++) { // int32_t _2 = (int32_t)(4294967295); // bool _3 = lchildren[idx[j]] == _2; // if (_3) // { // cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, // slen[idx[j]],nrhs,Ddim[lm[idx[j]]], // float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], // slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */), // &wskel[wskeloffset[idx[j]]], slen[idx[j]]); // } // if _3 // else { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[lchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _5 = _4 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[_5], slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else } // for j } // for k } // for i mkl_set_num_threads(12); for (int i = 3; i < 5; i++) { int32_t _0 = i + 1; //#pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_0]; k++) { int32_t _1 = k + 1; for (int j = wpart[k]; j < wpart[_1]; j++) { // int32_t _2 = (int32_t)(4294967295); // bool _3 = lchildren[idx[j]] == _2; // if (_3) // { // cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, // slen[idx[j]],nrhs,Ddim[lm[idx[j]]], // float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], // slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */), // &wskel[wskeloffset[idx[j]]], slen[idx[j]]); // } // if _3 // else { cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[lchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); int32_t _4 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _5 = _4 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[idx[j]],nrhs,slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &VT[_5], slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */), &wskel[wskeloffset[idx[j]]], slen[idx[j]]); } // if _3 else } // for j } // for k } // for i //mkl_set_num_threads(1); uint32_t _6 = (uint32_t)(1); uint32_t _7 = (uint32_t)(1023); #pragma omp parallel for for (int i = _6; i < _7; i++) { uint32_t _8 = (uint32_t)(1); int32_t _9 = i - _8; int32_t _10 = i + _8; int32_t _11 = i & 1; uint32_t _12 = (uint32_t)(0); bool _13 = _11 == _12; int32_t _14 = (int32_t)(_13 ? _9 : _10); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, slen[i],nrhs,slen[_14], _8, &B[Bptr[_9]], slen[i], &wskel[wskeloffset[_14]], slen[_14], _12, &uskel[uskeloffset[i]], slen[i]); } // for i int32_t _15 = 3 - 1; int32_t _16 = 5 - 1; mkl_set_num_threads(12); for (int i = _16; i > _15; i--) { int32_t _17 = i + 1; //#pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_17]; k++) { int32_t _18 = wpart[k] - 1; int32_t _19 = k + 1; int32_t _20 = wpart[_19] - 1; for (int j = _20; j > _18; j--) { // int32_t _21 = (int32_t)(4294967295); // bool _22 = lchildren[idx[j]] == _21; // if (_22) // { // cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, // Ddim[lm[idx[j]]],nrhs,slen[idx[j]], // float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], // slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), // &apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]); // } // if _22 // else { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[lchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]); int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _24 = _23 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[rchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[_24], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]); } // if _22 else } // for j } // for k } // for i _15 = 1 - 1; _16 = 3 - 1; mkl_set_dynamic(true); for (int i = _16; i > _15; i--) { int32_t _17 = i + 1; #pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_17]; k++) { int32_t _18 = wpart[k] - 1; int32_t _19 = k + 1; int32_t _20 = wpart[_19] - 1; for (int j = _20; j > _18; j--) { // int32_t _21 = (int32_t)(4294967295); // bool _22 = lchildren[idx[j]] == _21; // if (_22) // { // cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, // Ddim[lm[idx[j]]],nrhs,slen[idx[j]], // float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], // slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), // &apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]); // } // if _22 // else { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[lchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]); int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _24 = _23 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[rchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[_24], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]); } // if _22 else } // for j } // for k } // for i _15 = 0 - 1; _16 = 1 - 1; for (int i = _16; i > _15; i--) { int32_t _17 = i + 1; #pragma omp parallel for for (int k = clevelset[i]; k < clevelset[_17]; k++) { int32_t _18 = wpart[k] - 1; int32_t _19 = k + 1; int32_t _20 = wpart[_19] - 1; for (int j = _20; j > _18; j--) { int32_t _21 = (int32_t)(4294967295); bool _22 = lchildren[idx[j]] == _21; if (_22) { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, Ddim[lm[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]); } // if _22 else { cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[lchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]); int32_t _23 = slen[idx[j]] * slen[lchildren[idx[j]]]; int32_t _24 = _23 + VTptr[idx[j]]; cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, slen[rchildren[idx[j]]],nrhs,slen[idx[j]], float_from_bits(1065353216 /* 1 */), &VT[_24], slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */), &uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]); } // if _22 else } // for j } // for k } // for i return 0; } #ifdef __cplusplus } // extern "C" #endif
AntOMP.c
#include<stdio.h> #include<stdlib.h> #include<string.h> #include<math.h> #include<time.h> #include<omp.h> double dist[48][48]; double aleatorioEntero(int li, int ls); double aleatorio(); int probabilidad(double visi[], double fero[], int vector[], int cities); void gettingMatrix(); void printMatrix(); int main(){ //Funcion principal int i, j, cities, ants, condition, iter; cities=48; ants=1000; iter=10000; srand(time(NULL)); int ant[ants][cities+1]; // Inicializa la matriz de distancia (Fila columna) double fero[cities][cities], visi[cities][cities], prob[cities][cities]; //dist[cities][cities], double aux,random; int k,l,m, cityNow, vector[cities],condicion, contador; double feroIter[cities], visiIter[cities]; double recorrido[ants], best;//evaluacion best=10000000; double rho=0.0001, Q=500; //tasa de evaporacion feromona gettingMatrix(); //printMatrix(); for (i=0; i<cities; i++){ for (j=0; j<cities; j++){ fero[i][j]=0.1; if(i!=j){ visi[i][j]=500/dist[i][j]; } else{ visi[i][j]=0; } } } //-------------------Probability------------ double sumVF; for (i=0;i<cities;i++){ sumVF=0; for(j=0;j<cities;j++){ sumVF+=visi[i][j]*fero[i][j]; } aux=0; for(j=0;j<cities;j++){ prob[i][j]=((visi[i][j]*fero[i][j])/(sumVF)); } } //---------------------------- //------------ Hormiga solucion--------------- for(m=0;m<iter;m++){ for(i=0;i<=cities;i++){ for(j=0;j<=cities;j++){ ant[i][j]=0; } } #pragma omp parallel for private(i,j,aux,random, feroIter, visiIter, vector, cityNow) for(k=0;k<ants;k++){ ant[k][0]=0;//Inicia en la ciudad cero; random=aleatorio(); aux=0; ant[k][cities]=0; for(j=0;j<cities;j++){// inicia el vector con las N ciudades vector[j]=j; } j=1; do{ aux+=prob[0][j]; if(random<=aux){ ant[k][1]=j;//Selecciona la primera ciudad partiendo de la ciudad inicial vector[j]=0;//Anula la ciudad del listado } j++; }while(random>aux && j<cities); //------------------Resto ciudades---------------------- for(i=2;i<cities;i++){ cityNow=ant[k][i-1]; contador=0; for(j=0;j<cities;j++){ feroIter[j]=fero[cityNow][j]; visiIter[j]=visi[cityNow][j]; } ant[k][i]=probabilidad(visiIter, feroIter, vector, cities); vector[ant[k][i]]=0; } } //-------------- Evaluacion de las soluciones --------------- #pragma omp parallel for private(i,j) shared(best) for(k=0;k<ants;k++){ recorrido[k]=0; for(i=0;i<cities+1;i++){ recorrido[k]+=dist[ant[k][i]][ant[k][i+1]]; } if(recorrido[k]<best){ best=recorrido[k]; //printf("\n El mejor = %.lf la hormina %i iteracion %i", best,k,m); } } //----------------- Actualizacion de las feromonas #pragma omp parallel for private(i) shared(fero) for(k=0;k<ants;k++){ for(i=0;i<cities;i++){ fero[ant[k][i]][ant[k][i+1]]+=Q/recorrido[k]; fero[ant[k][i+1]][ant[k][i]]+=Q/recorrido[k]; } } #pragma omp parallel for private(j) shared(fero) for(i=0;i<cities;i++){ for(j=0;j<cities;j++){ fero[i][j]=fero[i][j]*(1-rho); if(fero[i][j]<0.01){ fero[i][j]=0.01; } } } }//fin de iteraciones printf("\nLa menor distancia fue %.lf\n", best); //------------------------------------------ return 0; } double aleatorioEntero(int li, int ls){ double numero; srand(time(NULL)); numero=li+rand() % ((ls+1)-1); return numero; } double aleatorio(){ //srand(time(NULL)); return (double)rand() / (double)RAND_MAX ; } int probabilidad(double visi[], double fero[], int vector[], int cities){ int i, j, city, condicion, contador; double sumVF, aux, probRel, number; #pragma omp private(i,j,sumVF,aux,probRel, number, condicion, city, contador) sumVF=0; contador=0; for(j=0;j<cities;j++){ if(vector[j]!=0){ sumVF+=visi[j]*fero[j]; } if(fero[j]<=0.000001){ contador++; } } if(sumVF<=0){printf("\n ERROR EN SUMA \n");} number=aleatorio(); aux=0; city=-1; condicion=0; j=0; while(j<cities && condicion==0){ if(vector[j]!=0){ probRel=(visi[j]*fero[j])/(sumVF); aux+=probRel; if(aux!=aux|| aux==INFINITY){ for(i=0;i<cities;i++){ printf("\n visibilidad %.6lf || fero %.6lf || suma %.6lf ||vector %i || %i", visi[i],fero[i],sumVF,vector[i],contador); } exit(-1); } if(number<=aux+0.0001){ city=j; condicion=1; } } //printf("\n %.5lf - %.5lf iter=%i suma %.5lf visi %.5lf fero %.5lf vector %i",aux, number,j, sumVF, visi[j],fero[j],vector[j]); j++; } if(city==-1){ printf("NO asigno"); printf("\n %.8lf - %.8lf iter=%i",aux, number,j-1); exit(-1); } return city; } void gettingMatrix(){ int i, j; FILE *inputFile; inputFile= fopen("matrix.txt", "r"); char help[300], *token; i=0; while(!feof(inputFile)){ fscanf(inputFile, "%s", help); token=strtok(help,","); j=0; while(token != NULL){ dist[i][j]=atof(token); token=strtok(NULL, ","); j++; } i++; } } void printMatrix(){ int i, j; for(i=0; i<48;i++){ for(j=0;j<48;j++){ printf("%.lf ",dist[i][j]); } printf("\n"); } }
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/threading.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include "score_updater.hpp" namespace LightGBM { using json11::Json; /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param start_iteration Start index of the iteration to predict * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_pred_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, max_iteration); if (num_iteration > 0) { num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration)); } else { num_pred_in_one_row *= (max_iteration - start_iteration); } } else if (is_pred_contrib) { num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_pred_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output) const override; void PredictContribByMap(const std::unordered_map<int, double>& features, std::vector<std::unordered_map<int, double>>* output) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration, int feature_importance_type) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, int feature_importance_type, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Calculate upper bound value * \return upper bound value */ double GetUpperBoundValue() const override; /*! * \brief Calculate lower bound value * \return lower bound value */ double GetLowerBoundValue() const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, num_iteration_for_pred_); if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration); } else { num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration; } start_iteration_for_pred_ = start_iteration; if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } protected: virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) { if (objective_function != nullptr) { return objective_function->IsConstantHessian(); } else { return false; } } /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Start iteration of used model */ int start_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; const int bagging_rand_block_ = 1024; std::vector<Random> bagging_rands_; ParallelPartitionRunner<data_size_t, false> bagging_runner_; Json forced_splits_json_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
spmv_tile.h
#include"common.h" #include"mmio_highlevel.h" //#include"mmio.h" #include"utils.h" #include"tilespmv_warp.h" //#include"tilespmv_warp_avx.h" void tilespmv(Beidou_Tile_Matrix *matrix, MAT_VAL_TYPE *x, MAT_VAL_TYPE *y, int new_row) // (int rowA, int colA, int *rowpointerA, int *columnindexA, MAT_VAL_TYPE *valueA, // int tilemA, int tilen, int numtileA, MAT_PTR_TYPE *tile_ptr_A, int *tile_columnidx, int *tile_nnz, char *Format, // int *blknnz, unsigned char *csr_ptr, int nnz_temp, // MAT_VAL_TYPE *Tile_csr_Val, unsigned char *Tile_csr_Col, unsigned char *Tile_csr_Ptr, int *csr_offset, int *csrptr_offset, // MAT_VAL_TYPE *Tile_coo_Val, unsigned char *Tile_coo_colIdx, unsigned char *Tile_coo_rowIdx, int *coo_offset, // MAT_VAL_TYPE *Tile_ell_Val, unsigned char *Tile_ell_colIdx, char *blkwidth, int *ell_offset, // MAT_VAL_TYPE *Tile_hyb_Val, unsigned char *Tile_hyb_ellcolIdx, unsigned char *Tile_hyb_coorowIdx, int *hyb_coocount, int *hyb_offset, // MAT_VAL_TYPE *Tile_dns_Val, int *dns_offset, // MAT_VAL_TYPE *Tile_denserow_Val, char *Tile_dnsrow_idx, int * denserowptr, int *dnsrow_offset, // MAT_VAL_TYPE *Tile_dnscol_Val, char *Tile_dnscol_idx, int *densecolptr, int *dnscol_offset, // MAT_VAL_TYPE *x, MAT_VAL_TYPE *y) { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; MAT_VAL_TYPE *value = matrix->value; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; char *Format = matrix->Format; int *blknnz = matrix->blknnz; char *blkwidth = matrix->blkwidth; MAT_VAL_TYPE *Tile_csr_Val = matrix->Tile_csr_Val; unsigned char *Tile_csr_Col = matrix->Tile_csr_Col; unsigned char *Tile_csr_Ptr = matrix->Tile_csr_Ptr; MAT_VAL_TYPE *Tile_coo_Val = matrix->Tile_coo_Val; unsigned char *Tile_coo_colIdx = matrix->Tile_coo_colIdx; unsigned char *Tile_coo_rowIdx = matrix->Tile_coo_rowIdx; MAT_VAL_TYPE *Tile_ell_Val = matrix->Tile_ell_Val; unsigned char *Tile_ell_colIdx = matrix->Tile_ell_colIdx; MAT_VAL_TYPE *Tile_hyb_Val = matrix->Tile_hyb_Val; unsigned char *Tile_hyb_ellcolIdx = matrix->Tile_hyb_ellcolIdx; unsigned char *Tile_hyb_coorowIdx = matrix->Tile_hyb_coorowIdx; MAT_VAL_TYPE *Tile_dns_Val = matrix->Tile_dns_Val; MAT_VAL_TYPE *Tile_dnsrow_Val = matrix->Tile_dnsrow_Val; char *Tile_dnsrow_idx = matrix->Tile_dnsrow_idx; MAT_VAL_TYPE *Tile_dnscol_Val = matrix->Tile_dnscol_Val; char *Tile_dnscol_idx = matrix->Tile_dnscol_idx; int *denserowptr = matrix->denserowptr; int *densecolptr = matrix->densecolptr; unsigned char *csr_ptr = matrix->csr_ptr; int *hyb_coocount = matrix->hyb_coocount; int *csr_offset = matrix->csr_offset; int *csrptr_offset = matrix->csrptr_offset; int *coo_offset = matrix->coo_offset; int *ell_offset = matrix->ell_offset; int *hyb_offset = matrix->hyb_offset; int *dns_offset = matrix->dns_offset; int *dnsrow_offset = matrix->dnsrow_offset; int *dnscol_offset = matrix->dnscol_offset; #pragma omp parallel for for (int blki = 0; blki < tilem; blki ++) { int tilenum_per_row=tile_ptr[blki+1]-tile_ptr[blki]; int rowlen= blki==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; int start = blki*BLOCK_SIZE; int end = blki==tilem-1 ? m : (blki+1)*BLOCK_SIZE ; for (int blkj = tile_ptr[blki]; blkj < tile_ptr[blki + 1]; blkj ++) { int collen = tile_columnidx[blkj] == tilen-1 ? n - (tilen-1 ) * BLOCK_SIZE : BLOCK_SIZE ; int tilennz = tile_nnz[blkj +1] - tile_nnz[blkj]; char format = Format[blkj]; int x_offset = tile_columnidx[blkj] * BLOCK_SIZE; switch (format) { case 0: { // warplevel_csr(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_csr_Val, Tile_csr_Col, Tile_csr_Ptr,csr_offset, csrptr_offset, // x,y, x_offset); warplevel_csr(matrix, blki, blkj, csr_offset, csrptr_offset, x, y, x_offset); break; } case 1: { // warplevel_coo(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_coo_Val, Tile_coo_colIdx, Tile_coo_rowIdx, coo_offset, // x,y, x_offset); // warplevel_coo(matrix, blki, blkj, coo_offset, // x, y, x_offset, BLOCK_SIZE); break; } case 2: { // warplevel_ell(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_ell_Val, Tile_ell_colIdx, blkwidth, ell_offset, // x,y, x_offset); warplevel_ell(matrix, blki, blkj, ell_offset, x, y, x_offset); break; } case 3: { // warplevel_hyb(m, n, tilem, tilen, blknnz, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, blkwidth, // Tile_hyb_Val, Tile_hyb_ellcolIdx, Tile_hyb_coorowIdx, hyb_coocount, hyb_offset, // x,y, x_offset); warplevel_hyb(matrix, blki, blkj,hyb_coocount, hyb_offset, x, y, x_offset); break; } case 4: { // warplevel_dns(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_dns_Val, dns_offset, // x,y, x_offset); warplevel_dns(matrix, blki, blkj, dns_offset, x, y, x_offset); break; } case 5: { // warplevel_dnsrow(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_dnsrow_Val, Tile_dnsrow_idx, denserowptr, dnsrow_offset, // x,y, x_offset); warplevel_dnsrow(matrix, blki, blkj, dnsrow_offset, x, y, x_offset); break; } case 6: { // warplevel_dnscol(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_dnscol_Val, Tile_dnscol_idx, densecolptr, dnscol_offset, // x,y, x_offset); warplevel_dnscol(matrix, blki, blkj, dnscol_offset, x, y, x_offset); break; } default: break; } } } #pragma omp parallel for for (int ri = 0; ri <new_row; ri++) { int rowidx = matrix->coo_new_rowidx[ri]; MAT_VAL_TYPE sum = 0; // for each nonzero in the row of the block // the last row uses nnzlocal for (int rj = matrix->coo_new_matrix_ptr[ri]; rj < matrix->coo_new_matrix_ptr[ri +1]; rj++) { int csrcolidx = matrix->coo_new_matrix_colidx[rj]; sum += x[csrcolidx] * matrix->coo_new_matrix_value[rj]; } y[rowidx] += sum; } }
array_sections-4.c
/* { dg-do run } */ #include <stdlib.h> void foo () { int A[30], *p; #pragma omp target data map(A[0:10]) { p = &A[0]; #pragma omp target map(p[3:7]) map(A[0:10]) { A[2] = 777; A[8] = 777; p[8] = 999; } } if (A[2] != 777 || A[8] != 999) abort (); } int main () { foo (); return 0; }
Matrix.h
#ifndef MATRIX_H_ #define MATRIX_H_ #include "../Toolbox.h" #include "../Serializable.h" #include "KTuple.h" #include <string.h> #include <stdio.h> #include <string> #include <sys/mman.h> #include <cmath> #include <type_traits> #ifdef LINUX #include <clocale> #endif #ifdef OSX #include <locale.h> #endif namespace Lazarus { // we are using n x m matrices ! //row major format (AS IT SHOULD BE) //transforms 2-dim coordinates into 1-dim memory position //with offset all 2-dim coordinates start at 1 /* In case indices starts from 1 i : row index j : column index ld : size of a row */ #define IDX2ARRAYCOORDINATE_RM_OFFSET_DIM2(i,j,lr) ( (((i)-1)*(lr)) + ((j)-1) ) /* In case indices starts from 0 (AS IT SHOULD BE!) i,j,ld : Same as above */ #define IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,lr) ( ((i)*(lr))+(j) ) //column major format (just as CUDA desires) /* In case indices starts from 1 i : row index j : column index ld : size of a column */ #define IDX2ARRAYCOORDINATE_CM_OFFSET_DIM2(i,j,lc) ( (((j)-1)*(lc)) + ((i)-1) ) /* In case indices starts from 0 i,j,ld : Same as above */ #define IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,lc) ( ((j)*(lc))+(i) ) //analogously for 3 dims #define IDX2ARRAYCOORDINATE_RM_OFFSET_DIM3(i,j,k,lr,lc) ( (((k)-1)*(lr*lc)) + (((i)-1)*(lr)) + ((j)-1) ) #define IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,lr,lc) ( (((k) )*(lr*lc)) + (((i) )*(lr)) + ((j) ) ) #define IDX2ARRAYCOORDINATE_CM_OFFSET_DIM3(i,j,k,lr,lc) ( (((k)-1)*(lr*lc)) + (((j)-1)*(lc)) + ((i)-1) ) #define IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,lr,lc) ( (((k) )*(lr*lc)) + (((j) )*(lc)) + ((i) ) ) //******************* generic base class ************************ class Matrix : public Serializable { public: /*enum MATRIX_DATA_TYPE {MATRIX_DATA_TYPE_DOUBLE, MATRIX_DATA_TYPE_FLOAT, MATRIX_DATA_TYPE_LONG_DOUBLE, MATRIX_DATA_TYPE_INT, MATRIX_DATA_TYPE_LONGLONG, MATRIX_DATA_TYPE_ULONGLONG, MATRIX_DATA_TYPE_LONG, MATRIX_DATA_TYPE_ULONG,MATRIX_DATA_TYPE_CHAR, MATRIX_DATA_TYPE_SHORT,MATRIX_DATA_TYPE_USHORT, MATRIX_DATA_TYPE_UINT, MATRIX_DATA_TYPE_UCHAR, MATRIX_DATA_TYPE_ABSTRACT, MATRIX_DATA_TYPE_UNSPECIFIED};*/ enum MATRIX_DATA_ALIGNMENT {MATRIX_DATA_ALIGNMENT_ROW_MAJOR,MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR}; Matrix( enum MATRIX_DATA_ALIGNMENT data_alignment = MATRIX_DATA_ALIGNMENT_ROW_MAJOR, enum SH_DATA_TYPE data_type = SH_DATA_TYPE_FLOAT ) { this->data_alignment = data_alignment; this->data_type = data_type; } virtual ~Matrix() { } CLASS_RTTI(Lazarus::Matrix) protected: enum MATRIX_DATA_ALIGNMENT data_alignment; enum SH_DATA_TYPE data_type; }; //******************* 2 dims ******************************* template<class T> class Matrix2 : public Matrix { public: /** * This class is merely a state container in order to use the [][] * operator on a matrix, i.e. mat[] returns a row pointer object X * which has the desired row >index< temporarily saved. * X[] will finally deliver the value in this row,column element. * */ class Matrix2RowPointer { friend class Matrix2<T>; public: T& operator[](unsigned int col) { if(m_row>=mp_matrix->m_rows) { printf("ERROR: getData row %d not available\n",m_row); return mp_matrix->data[0]; } if(col>=mp_matrix->m_columns) { printf("ERROR: getData column %d not available\n",col); return mp_matrix->data[0]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->rows)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(m_row,col,mp_matrix->m_rows)]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(m_row,col,mp_matrix->m_columns)]; } return mp_matrix->data[0]; } const T& operator[](unsigned int col) const { if(m_row>=mp_matrix->m_rows) { printf("ERROR: getData row %d not available\n",m_row); return mp_matrix->data[0]; } if(col>=mp_matrix->m_columns) { printf("ERROR: getData column %d not available\n",col); return mp_matrix->data[0]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->rows)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(m_row,col,mp_matrix->m_rows)]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(m_row,col,mp_matrix->m_columns)]; } return mp_matrix->data[0]; } private: Matrix2RowPointer(Matrix2<T>* mat, int row) { mp_matrix = mat; m_row = row; } Matrix2<T>* mp_matrix;//external ref unsigned int m_row; }; Matrix2(enum MATRIX_DATA_ALIGNMENT data_alignment = MATRIX_DATA_ALIGNMENT_ROW_MAJOR, bool pinned = false) { this->m_columns = 0; this->m_rows = 0; this->data_alignment = data_alignment; this->data = NULL; this->pinned = pinned; //determine the type if(std::is_same<T,int>::value == true) { this->data_type = SH_DATA_TYPE_INT; } else if(std::is_same<T,unsigned int>::value == true) { this->data_type = SH_DATA_TYPE_UINT; } else if(std::is_same<T,char>::value == true) { this->data_type = SH_DATA_TYPE_CHAR; } else if(std::is_same<T,unsigned char>::value == true) { this->data_type = SH_DATA_TYPE_UCHAR; } else if(std::is_same<T,long int>::value == true) { this->data_type = SH_DATA_TYPE_LONG; } else if(std::is_same<T,long long int>::value == true) { this->data_type = SH_DATA_TYPE_LONGLONG; } else if(std::is_same<T,unsigned long>::value == true) { this->data_type = SH_DATA_TYPE_ULONG; } else if(std::is_same<T,unsigned long long>::value == true) { this->data_type = SH_DATA_TYPE_ULONGLONG; } else if(std::is_same<T,float>::value == true) { this->data_type = SH_DATA_TYPE_FLOAT; } else if(std::is_same<T,double>::value == true) { this->data_type = SH_DATA_TYPE_DOUBLE; } else if(std::is_same<T,long double>::value == true) { this->data_type = SH_DATA_TYPE_LONG_DOUBLE; } else if(std::is_same<T,unsigned short>::value == true) { this->data_type = SH_DATA_TYPE_USHORT; } else if(std::is_same<T,short>::value == true) { this->data_type = SH_DATA_TYPE_SHORT; } else { this->data_type = SH_DATA_TYPE_ABSTRACT; } } Matrix2(const Matrix2<T>& matrix) { this->m_columns = 0; this->m_rows = 0; this->data = NULL; this->data_type = matrix.getDataType(); this->data_alignment = matrix.getDataAlignment(); this->pinned = matrix.getPinned(); this->initMatrix(matrix.getRowCount(), matrix.getColumnCount()); //copy data //memcpy(this->data, matrix.getDataPtr(), matrix.getRowCount()* matrix.getColumnCount() * sizeof(T)); //copy data for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j, matrix.getData(i,j) ); } } } Matrix2(Matrix2<T>* matrix) { this->m_columns = 0; this->m_rows = 0; this->data = NULL; this->data_type = matrix->getDataType(); this->data_alignment = matrix->getDataAlignment(); this->pinned = matrix->getPinned(); this->initMatrix(matrix->getRowCount(), matrix->getColumnCount()); //copy data //memcpy(this->data, matrix->getDataPtr(), matrix->getRowCount()* matrix->getColumnCount() * sizeof(T)); //copy data for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j, matrix->getData(i,j) ); } } } Matrix2(Matrix2<T>* matrix, enum MATRIX_DATA_ALIGNMENT data_alignment, bool pinned = false) { this->m_columns = 0; this->m_rows = 0; this->data = NULL; this->data_type = matrix->getDataType(); this->data_alignment = data_alignment; this->pinned = pinned; this->initMatrix(matrix->getRowCount(), matrix->getColumnCount()); //copy data for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j, matrix->getData(i,j) ); } } } virtual ~Matrix2() { if(this->pinned == false) { DELETE_ARRAY_NULL_CHECKING(this->data); } else { if(this->data != NULL) { munlock(this->data,sizeof(T)*this->m_columns*this->m_rows); delete this->data; } } } /** * Keep in mind that the [][] approach induces an overhead since for each * request a new object will be instantiated. **/ inline Matrix2RowPointer operator[](unsigned int row) { return Matrix2RowPointer(this,row); } /** * Keep in mind that the [][] approach induces an overhead since for each * request a new object will be instantiated. **/ inline const Matrix2RowPointer operator[](unsigned int row) const { return Matrix2RowPointer(this,row); } Matrix2<T>& operator =(const Matrix2<T>& matrix) { this->m_columns = 0; this->m_rows = 0; DELETE_ARRAY_NULL_CHECKING(this->data); this->data_type = matrix.getDataType(); this->data_alignment = matrix.getDataAlignment(); this->pinned = matrix.getPinned(); this->initMatrix(matrix.getRowCount(), matrix.getColumnCount()); //copy data //memcpy(this->data, matrix.getDataPtr(), matrix.getRowCount()* matrix.getColumnCount() * sizeof(T)); for(unsigned int i=0;i<this->m_rows;++i) { for(unsigned int j=0;j<this->m_columns;++j) { this->setData(i,j,const_cast<const T&>(matrix.getData(i,j)) ); } } return *this; } Matrix2<T>& operator =(const Matrix2<T>* matrix) { this->m_columns = 0; this->m_rows = 0; DELETE_ARRAY_NULL_CHECKING(this->data); this->data_type = matrix->getDataType(); this->data_alignment = matrix->getDataAlignment(); this->pinned = matrix->getPinned(); this->initMatrix(matrix->getRowCount(), matrix->getColumnCount()); //copy data //memcpy(this->data, matrix->getDataPtr(), matrix->getRowCount()* matrix->getColumnCount() * sizeof(T)); for(unsigned int i=0;i<this->m_rows;++i) { for(unsigned int j=0;j<this->m_columns;++j) { this->setData(i,j,const_cast<const T&>(matrix->getData(i,j)) ); } } return *this; } bool getPinned() const { return this->pinned; } CLASS_RTTI(Lazarus::Matrix2) void serialize() { registerElement<unsigned int>(2); registerElement<enum MATRIX_DATA_ALIGNMENT>(1); registerElement<enum SH_DATA_TYPE>(1); registerUCharA(m_rows*m_columns*sizeof(T)); allocateBuffer(); addElement<enum MATRIX_DATA_ALIGNMENT>(data_alignment); addElement<enum SH_DATA_TYPE>(data_type); addUCharA((unsigned char*)data,m_rows*m_columns*sizeof(T)); addElement<unsigned int>(m_rows); addElement<unsigned int>(m_columns); } void deserialize() { //free any data DELETE_NULL_CHECKING(data); m_columns = getElement<unsigned int>(); m_rows = getElement<unsigned int>(); unsigned long long size; data = (T*)getUCharA(size); data_type = getElement<enum SH_DATA_TYPE>(); data_alignment = getElement<enum MATRIX_DATA_ALIGNMENT>(); resetBuffer(); } /** * On a call to this method the matrix will be resized to the specified dimensions. Any previously * allocated internal memory will be deleted, keep in mind that in case of pointers the corresponding * objects won't be deleted.For truly large matrices set 'parallel' true. */ void initMatrix(unsigned int rows, unsigned int columns, bool parallel = false) { if(rows == 0 || columns == 0) { return; } if(this->pinned == false) { // release memory if already allocated DELETE_ARRAY_NULL_CHECKING(this->data); // reserve memory this->data = new T[rows*columns]; this->m_rows = rows; this->m_columns = columns; } else { // release memory if already allocated if(this->data != NULL) { munlock(this->data,sizeof(T)*this->m_columns*this->m_rows); delete this->data; } // reserve memory and lock it this->data = new T[rows*columns]; mlock(this->data,sizeof(T)*rows*columns); this->m_rows = rows; this->m_columns = columns; } resetMatrix(parallel); } /** * On a call to this method the matrix will be resized to the specified dimensions. Any previously * allocated internal memory will be deleted, keep in mind that in case of pointers the corresponding * objects won't be deleted. The matrix will be initialized to all ZERO! If 'large' is set an optimized * subroutine will be used to initialize the entries, use this only for LARGE dims! For truly large matrices * set 'parallel' true. */ void initMatrixZERO(unsigned int rows, unsigned int columns, bool large = false, bool parallel = false) { if(rows == 0 || columns == 0) { return; } if(this->pinned == false) { // release memory if already allocated DELETE_ARRAY_NULL_CHECKING(this->data); // reserve memory this->data = new T[rows*columns]; this->m_rows = rows; this->m_columns = columns; } else { // release memory if already allocated if(this->data != NULL) { munlock(this->data,sizeof(T)*this->m_columns*this->m_rows); delete this->data; } // reserve memory and lock it this->data = new T[rows*columns]; mlock(this->data,sizeof(T)*rows*columns); this->m_rows = rows; this->m_columns = columns; } globalSetMatrix(0,large,parallel); } /** * Will iterate over all entries and set them to T(). Use parallel for large matrices! * */ void resetMatrix( bool parallel = false ) { globalSetMatrixVal(T(), parallel); } /** * This is a fast method for setting the initial value, if 'large' is set an optimized * subroutine will be used to initialize the entries! For truly large matrices set 'parallel' * true. The optimization heuristic works best if the amount of columns largely exceeds the amount of * rows or vice versa. * Keep in mind that this routine will simply iterate in byte-wise manner over the underlying * array and set each byte to 'val'!! */ void globalSetMatrix(unsigned char val, bool large = false, bool parallel = false) { if(large == true)//set each row via memcpy if(parallel == true) if(m_columns >= m_rows) Toolbox::setArrayValParallel(val, (unsigned char*)this->data, sizeof(T)*m_rows*m_columns, sizeof(T)*m_columns); else Toolbox::setArrayValParallel(val, (unsigned char*)this->data, sizeof(T)*m_rows*m_columns, sizeof(T)*m_rows); else if(m_columns >= m_rows) Toolbox::setArrayVal(val, (unsigned char*)this->data, sizeof(T)*m_rows*m_columns, sizeof(T)*m_columns); else Toolbox::setArrayVal(val, (unsigned char*)this->data, sizeof(T)*m_rows*m_columns, sizeof(T)*m_rows); else memset(this->data,val,sizeof(T)*m_rows*m_columns); } /** * This is a relatively slow method for setting the initial value, yet it should be used * for non-primitive datatypes or primitive types which exceed char size. * Use parallel for large matrices! */ void globalSetMatrixVal(const T& val, bool parallel = false) { if(parallel == true) for(unsigned int i=0; i < this->m_columns * this->m_rows; ++i) { this->data[i] = val; } else #pragma omp parallel for for(unsigned int i=0; i < this->m_columns * this->m_rows; ++i) { this->data[i] = val; } } /** * Returns the specified submatrix. * */ Matrix2<T>* getSubMatrix(unsigned int start_row, unsigned int start_column, unsigned int rows, unsigned int columns) const { Matrix2<T>* output = new Matrix2<T>(this->data_alignment); output->initMatrix(rows,columns); //copy the data if(this->data_alignment == Matrix::MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { for(unsigned int i=0;i<rows;++i) { for(unsigned int j=0;j<columns;++j) { output->setData(i,j, this->getData(start_row+i,start_column+j)); } } } else { for(unsigned int j=0;j<columns;++j) { for(unsigned int i=0;i<rows;++i) { output->setData(i,j, this->getData(start_row+i,start_column+j)); } } } return output; } /** * Returns a copy of the matrix with added rows and columns, each matrix element is set to val. * */ Matrix2<T>* getPaddedMatrix(unsigned int rows, unsigned int columns, const T& val) const { Matrix2<T>* output = new Matrix2<T>(this->data_alignment); output->initMatrix(this->m_rows + rows,this->m_columns + columns); output->globalSetMatrixVal(val); //copy the data if(this->data_alignment == Matrix::MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { for(unsigned int i=0;i<this->m_rows;++i) { for(unsigned int j=0;j<this->m_columns;++j) { output->setData(i, j, this->getData(i,j)); } } } else//column major { for(unsigned int j=0;j<this->m_columns;++j) { for(unsigned int i=0;i<this->m_rows;++i) { output->setData(i, j, this->getData(i,j)); } } } return output; } /** * returns a submatrix by removing row 'row' and column 'column' from the current matrix. * */ Matrix2<T>* removeRowColumn(unsigned int row, unsigned int column) { Matrix2<T>* output = new Matrix2<T>(this->data_alignment); output->initMatrix(m_rows-1,m_columns-1); int v_row = 0; int v_column = 0; for(unsigned int a=0; a < m_rows; a++) { if(a==row) { v_row = -1; continue; } for(unsigned int b=0; b < m_columns; b++) { if(b==column) { v_column = -1; continue; } output->setData(a+v_row, b+v_column, this->getData(a, b)); } v_column = 0; } return output; } /** * Calculates the matrix determinant. Returns 0 in case of abstract matrices. * */ template<typename U> U determinant() { U det = U(); if(this->data_type != SH_DATA_TYPE_ABSTRACT) { det = determinant_<U>(this); } else { printf("ERROR: can not calculate determinant of abstract matrix\n"); } return det; } /** * Calculates the inverse of the matrix. Returns null if matrix is not invertible or of abstract type. * */ template<typename U> Matrix2<T>* getInverse() { Matrix2<T>* inv = NULL; U det = this->determinant<U>(); //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not calculate inverse of an abstract matrix\n"); return inv; } //determinant = 0 if(det == (U)0) { printf("ERROR: matrix is not invertible\n"); return inv; } //not quadratic if(m_rows != m_columns) { printf("ERROR: matrix is not invertible\n"); return inv; } for(int i=0;i<m_rows;++i) { for(int j=0;j<m_columns;++j) { Matrix2<T>* minor_ij = this->removeRowColumn(i, j); U det_minor = minor_ij->determinant<U>(); inv->setData(j, i, (int)( std::pow(-1, i+1+j+1)*(det_minor/det) ) );//implicit transpose, thus inverse delete minor_ij; } } return inv; } /** * Scales entries with a, only applicable for non-abstract matrices. Leaves matrix unchanged otherwise. * */ void scale(T a) { //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not scale an abstract matrix\n"); return; } for(int i=0;i<m_rows;++i) { for(int j=0;j<m_columns;++j) { this->setData(i, j, this->getData(i, j)*a); } } } /** * A pointwise multiplication of the matrix with matrix A. * */ void pointMultiply(Lazarus::Matrix2<T>* A) { //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not pointwise multiply an abstract matrix\n"); return; } if(this->m_rows != A->getRowCount() || this->m_columns != A->getColumnCount()) { printf("ERROR: can not pointwise multiply with matrix of different size\n"); return; } for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { this->setData(i,j, this->getData(i,j) * A->getData(i,j) ); } } } /** * A pointwise multiplication of the matrix with matrix A. * */ void pointMultiply(const Lazarus::Matrix2<T>& A) { //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not pointwise multiply an abstract matrix\n"); return; } if(this->m_rows != A.getRowCount() || this->m_columns != A.getColumnCount()) { printf("ERROR: can not pointwise multiply with matrix of different size\n"); return; } for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { this->setData(i,j, this->getData(i,j) * A.getData(i,j) ); } } } /** * A pointwise division of the matrix with matrix A. * */ void pointDivide(Lazarus::Matrix2<T>* A) { //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not pointwise divide an abstract matrix\n"); return; } if(this->m_rows != A->getRowCount() || this->m_columns != A->getColumnCount()) { printf("ERROR: can not pointwise divide with matrix of different size\n"); return; } for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { this->setData(i,j, this->getData(i,j) / A->getData(i,j) ); } } } /** * A pointwise division of the matrix with matrix A. * */ void pointDivide(const Lazarus::Matrix2<T>& A) { //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not pointwise divide an abstract matrix\n"); return; } if(this->m_rows != A.getRowCount() || this->m_columns != A.getColumnCount()) { printf("ERROR: can not pointwise divide with matrix of different size\n"); return; } for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { this->setData(i,j, this->getData(i,j) / A.getData(i,j) ); } } } /** * this should only be used for debug purposes and especially for small matrices, thus we don't care * about the switch within each iteration of the inner most for loop. */ virtual void printData() const { for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { switch(this->data_type) { case SH_DATA_TYPE_DOUBLE: printf("%F ", *((double*)&getData(i,j))); break; case SH_DATA_TYPE_FLOAT: printf("%f ", *((float*)&getData(i,j))); break; case SH_DATA_TYPE_LONG_DOUBLE: printf("%Lf ", *((long double*)&getData(i,j))); break; case SH_DATA_TYPE_INT: printf("%d ", *((int*)&getData(i,j))); break; case SH_DATA_TYPE_LONGLONG: printf("%lld ", *((long long int*)&getData(i,j))); break; case SH_DATA_TYPE_ULONGLONG: printf("%llu ", *((unsigned long long*)&getData(i,j))); break; case SH_DATA_TYPE_LONG: printf("%ld ", *(( long int*)&getData(i,j))); break; case SH_DATA_TYPE_ULONG: printf("%lu ", *((unsigned long*)&getData(i,j))); break; case SH_DATA_TYPE_UINT: printf("%u ", *((unsigned int*)&getData(i,j))); break; case SH_DATA_TYPE_UCHAR: printf("%u ", *((unsigned char*)&getData(i,j))); break; case SH_DATA_TYPE_CHAR: printf("%d ", *((char*)&getData(i,j))); break; case SH_DATA_TYPE_SHORT: printf("%hd ", *((short*)&getData(i,j))); break; case SH_DATA_TYPE_USHORT: printf("%hu ", *((unsigned short*)&getData(i,j))); break; case SH_DATA_TYPE_ABSTRACT: printf("ABSTRACT "); break; case SH_DATA_TYPE_UNSPECIFIED: printf(" UNKNOWN "); break; } } printf("\n"); } } void setData(unsigned int i, unsigned int j, const T& value) { if(i>=m_rows) { printf("ERROR: setData row %d not available\n",i); return; } if(j>=m_columns) { printf("ERROR: setData column %d not available\n",j); return; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)] = value; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)] = value; } } inline T& getData(unsigned int i, unsigned int j) const { //T output=0; if(i>=m_rows) { printf("ERROR: getData row %d not available\n",i); return this->data[0]; } if(j>=m_columns) { printf("ERROR: getData column %d not available\n",j); return this->data[0]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)]; return this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)]; return this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)]; } return this->data[0]; //return output; } T& operator ()(unsigned int& i, unsigned int& j) { //T output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)]; return this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)]; return this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)]; } return this->data[0]; //return output; } inline T* getDataRef(unsigned int i, unsigned int j) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { output = this->data + IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { output = this->data + IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns); } return output; } /* * This method returns a pointer to row i of the underlying data array. * I.e. one can return a pointer to the i-th row of a row-major matrix */ inline T* getRawDataRow(unsigned int i) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { printf("ERROR: can not return row reference as the matrix is in column major format\n"); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { output = &(this->data[ i*this->m_columns ] ); } return output; } /* * This method returns a pointer to column i of the underlying data array. * I.e. one can return a pointer to the i-th column of a column-major matrix */ inline T* getRawDataColumn(unsigned int i) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { printf("ERROR: can not return column reference as the matrix is in row major format\n"); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { output = &(this->data[ i*this->m_rows ] ); } return output; } T* getDataPtr() const { return this->data; } unsigned int getRowCount() const { return this->m_rows; } unsigned int getColumnCount() const { return this->m_columns; } unsigned int rows() const { return this->m_rows; } unsigned int columns() const { return this->m_columns; } void readMatrixFromFile(const std::string& filename) { setlocale(LC_ALL,"C"); FILE* pFile = fopen(filename.c_str(),"r"); std::string s; //read header and init data structures // from now on we assume a correct format of the input file !! std::string rows=""; std::string columns=""; readCSVValue(&pFile,&rows); readCSVValue(&pFile,&columns); this->initMatrix( Toolbox::stringToInt( rows ),Toolbox::stringToInt( columns ) ); //load data into matrix for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { readCSVValue(&pFile,&s); //read token #ifdef DEBUG printf("setting number %s \n",s.c_str()); #endif this->setData(i,j, Toolbox::stringToDouble( s ) ); } } fclose(pFile); } void readMatrixFromBinaryFile(const std::string& filename) { FILE* pFile = fopen(filename.c_str(),"rb"); std::string s; //read header and init data structures // from now on we assume a correct format of the input file !! std::string rows=""; std::string columns=""; readCSVValue(&pFile,&rows); readCSVValue(&pFile,&columns); this->initMatrix( Toolbox::stringToInt( rows ),Toolbox::stringToInt( columns ) ); //load data into matrix fread(this->data, this->m_rows*this->m_columns,sizeof(T), pFile); fclose(pFile); } void writeMatrixToFile(const std::string& filename) const { if(data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can't save abstract matrix in CSV format, use binary export methods\n"); } setlocale(LC_ALL,"C"); FILE* pFile = fopen(filename.c_str(),"w"); std::string s; //header s = Toolbox::intToString( this->m_rows ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_columns ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(';',pFile); fputc('\n',pFile); //data for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { s = Toolbox::doubleToString( this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)] ); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { s = Toolbox::doubleToString( this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)] ); } fwrite(s.c_str(), s.length(),sizeof(char), pFile); if(j+1<this->m_columns) fputc(',',pFile); } fputc(';',pFile); fputc('\n',pFile); } fclose(pFile); } void writeMatrixToBinaryFile(const std::string& filename) const { FILE* pFile = fopen(filename.c_str(),"wb"); std::string s; //header s = Toolbox::intToString( this->m_rows ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_columns ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(';',pFile); fputc('\n',pFile); //data fwrite(this->data, this->m_columns*this->m_rows,sizeof(T), pFile); fclose(pFile); } void exportMatrix(const std::string& filename) { if(data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can't save abstract matrix in CSV format, use binary export methods\n"); } FILE* pFile = fopen(filename.c_str(),"w"); std::string s; for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { s = Toolbox::doubleToString( (double)getData(i,j) ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); if(j+1<this->m_columns) fputc(',',pFile); } fputc(';',pFile); fputc('\n',pFile); } fclose(pFile); } void exportMatrixBinary(const std::string& filename) { FILE* pFile = fopen(filename.c_str(),"wb"); //data fwrite(this->data, this->m_columns*this->m_rows,sizeof(T), pFile); fclose(pFile); } enum MATRIX_DATA_ALIGNMENT getDataAlignment() const { return this->data_alignment; } enum SH_DATA_TYPE getDataType() const { return this->data_type; } protected: unsigned int m_rows; unsigned int m_columns; T* data; bool pinned; // reads a csv-token, returns false if terminated by , and true if terminated by ; or EOF bool readCSVValue(FILE** pFile, std::string* s) { char c[2]; c[1] = '\0'; *s = ""; //reset string //read chars until , ; or EOF found while(1){ c[0] = fgetc(*pFile); if(c[0]==',') return false; if(c[0]==';' || c[0]==EOF) { fgetc(*pFile); //read ahead one char, i.e. skip line break. Uncomment this if linebreaks are not being used after ; return true; } #ifdef DEBUG printf("appending %s to %s \n",(char*)&c,(*s).c_str()); #endif s->append(c); } } /** * Helper method for determinant calculation. Only applicable for non-abstract template parameters. * */ template<typename U> U determinant_(Matrix2<T>* mat) { int sum=0; int s; if(mat->getColumnCount()==1 && mat->getRowCount()==1) { //bottom case of recursion. size 1 matrix determinant is itself. return(mat->getData(0, 0)); } for(unsigned int i=0;i<mat->getRowCount();i++) { //finds determinant using row-by-row expansion Matrix2<T>* submat = mat->removeRowColumn(i,0); //submat->printData(); if(i%2==0) { s=1; } else { s=-1; } sum += s * mat->getData(i, 0)*(determinant_<U>(submat)); //printf("%u: %d \n",i,sum); delete submat; } //printf("final %d \n",sum); return(sum); //returns determinant value. once stack is finished, returns final determinant. } }; //******************* 3 dims ******************************* template<class T> class Matrix3 : public Matrix{ public: /** * This class is merely a state container in order to use the [][][] * operator on a matrix, i.e. mat[][] returns a row-column pointer object X * which has the desired row and column >indices< temporarily saved. * X[] will deliver the row pointer. * */ class Matrix3RowColumnPointer { friend class Matrix3<T>; public: T& operator[](unsigned int level) { if(m_row>=mp_matrix->m_rows) { printf("ERROR: getData row %d not available\n",m_row); return mp_matrix->data[0]; } if(m_col>=mp_matrix->m_columns) { printf("ERROR: getData column %d not available\n",m_col); return mp_matrix->data[0]; } if(level>=mp_matrix->m_levels) { printf("ERROR: getData level %d not available\n",level); return mp_matrix->data[0]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(m_row,m_col,level,mp_matrix->m_rows,mp_matrix->m_columns)]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(m_row,m_col,level,mp_matrix->m_rows,mp_matrix->m_columns)]; } return mp_matrix->data[0]; } const T& operator[](unsigned int level) const { if(m_row>=mp_matrix->m_rows) { printf("ERROR: getData row %d not available\n",m_row); return mp_matrix->data[0]; } if(m_col>=mp_matrix->m_columns) { printf("ERROR: getData column %d not available\n",m_col); return mp_matrix->data[0]; } if(level>=mp_matrix->m_levels) { printf("ERROR: getData level %d not available\n",level); return mp_matrix->data[0]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(m_row,m_col,level,mp_matrix->m_rows,mp_matrix->m_columns)]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(m_row,m_col,level,mp_matrix->m_rows,mp_matrix->m_columns)]; } return mp_matrix->data[0]; } private: Matrix3RowColumnPointer(Matrix3<T>* mat,int row, int column) { mp_matrix = mat; m_col = column; m_row = row; } Matrix3<T>* mp_matrix;//external ref unsigned int m_col; unsigned int m_row; }; /** * This class is merely a state container in order to use the [][][] * operator on a matrix, i.e. mat[][] returns a row-column pointer object X * which has the desired row and column >indices< temporarily saved. * X[] will deliver the row pointer. * */ class Matrix3RowPointer { friend class Matrix3<T>; friend class Matrix3RowColumnPointer; public: Matrix3RowColumnPointer operator[](unsigned int column) { return Matrix3LevelPointer(mp_matrix,m_row,column); } const Matrix3RowColumnPointer operator[](unsigned int column) const { return Matrix3LevelPointer(mp_matrix,m_row,column); } private: Matrix3RowPointer(Matrix3<T>* mat, unsigned int row) { mp_matrix = mat; m_row = row; } Matrix3<T>* mp_matrix;//external ref unsigned int m_row; }; Matrix3(enum MATRIX_DATA_ALIGNMENT data_alignment = MATRIX_DATA_ALIGNMENT_ROW_MAJOR, bool pinned = false) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; this->data_alignment = data_alignment; this->data = NULL; this->pinned = pinned; //determine the type if(std::is_same<T,int>::value == true) { this->data_type = SH_DATA_TYPE_INT; } else if(std::is_same<T,unsigned int>::value == true) { this->data_type = SH_DATA_TYPE_UINT; } else if(std::is_same<T,char>::value == true) { this->data_type = SH_DATA_TYPE_CHAR; } else if(std::is_same<T,unsigned char>::value == true) { this->data_type = SH_DATA_TYPE_UCHAR; } else if(std::is_same<T,long int>::value == true) { this->data_type = SH_DATA_TYPE_LONG; } else if(std::is_same<T,long long int>::value == true) { this->data_type = SH_DATA_TYPE_LONGLONG; } else if(std::is_same<T,unsigned long>::value == true) { this->data_type = SH_DATA_TYPE_ULONG; } else if(std::is_same<T,unsigned long long>::value == true) { this->data_type = SH_DATA_TYPE_ULONGLONG; } else if(std::is_same<T,float>::value == true) { this->data_type = SH_DATA_TYPE_FLOAT; } else if(std::is_same<T,double>::value == true) { this->data_type = SH_DATA_TYPE_DOUBLE; } else if(std::is_same<T,long double>::value == true) { this->data_type = SH_DATA_TYPE_LONG_DOUBLE; } else if(std::is_same<T,unsigned short>::value == true) { this->data_type = SH_DATA_TYPE_USHORT; } else if(std::is_same<T,short>::value == true) { this->data_type = SH_DATA_TYPE_SHORT; } else { this->data_type = SH_DATA_TYPE_ABSTRACT; } } Matrix3(Matrix3<T>* matrix, enum MATRIX_DATA_ALIGNMENT data_alignment, bool pinned = false) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; this->data = NULL; this->data_type = matrix->getDataType(); this->data_alignment = data_alignment; this->pinned = pinned; this->initMatrix(matrix->getRowCount(), matrix->getColumnCount(), matrix->getLevelCount()); //copy data for(unsigned int k=0;k<this->getLevelCount();k++) { for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j,k, matrix->getData(i,j,k) ); } } } } Matrix3(Matrix3<T>* matrix) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; this->data = NULL; this->data_type = matrix->getDataType(); this->data_alignment = matrix->getDataAlignment(); this->pinned = matrix->getPinned(); this->initMatrix(matrix->getRowCount(), matrix->getColumnCount(), matrix->getLevelCount()); //copy data //memcpy(this->data, matrix->getDataPtr(), matrix->getRowCount()* matrix->getColumnCount() * matrix->getLevelCount() * sizeof(T)); //copy data for(unsigned int k=0;k<this->getLevelCount();k++) { for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j,k, matrix->getData(i,j,k) ); } } } } Matrix3(const Matrix3<T>& matrix) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; this->data = NULL; this->data_type = matrix.getDataType(); this->data_alignment = matrix.getDataAlignment(); this->pinned = matrix.getPinned(); this->initMatrix(matrix.getRowCount(), matrix.getColumnCount(), matrix.getLevelCount()); //copy data //memcpy(this->data, matrix.getDataPtr(), matrix.getRowCount()* matrix.getColumnCount() * matrix.getLevelCount() * sizeof(T)); //copy data for(unsigned int k=0;k<this->getLevelCount();k++) { for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j,k, matrix.getData(i,j,k) ); } } } } virtual ~Matrix3() { if(this->pinned == false) { DELETE_ARRAY_NULL_CHECKING(this->data); } else { if(this->data != NULL) { munlock(this->data,sizeof(T)*this->m_columns*this->m_rows*this->m_levels); delete this->data; } } } /** * Keep in mind that the [][] approach induces an overhead since for each * request a new object will be instantiated. **/ inline const Matrix3RowPointer operator[](int row) const { return Matrix3RowPointer(this,row); } Matrix3<T>& operator =(const Matrix3<T>& matrix) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; DELETE_ARRAY_NULL_CHECKING(this->data); this->data_type = matrix.getDataType(); this->data_alignment = matrix.getDataAlignment(); this->pinned = matrix.getPinned(); this->initMatrix(matrix.getRowCount(), matrix.getColumnCount(), matrix.getLevelCount()); //copy data //memcpy(this->data, matrix.getDataPtr(), matrix.getRowCount()* matrix.getColumnCount() * matrix.getLevelCount() * sizeof(T)); for(unsigned int k=0;k<this->getLevelCount();k++) { for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j,k, const_cast<const T&>(matrix.getData(i,j,k)) ); } } } return *this; } Matrix3<T>& operator =(const Matrix3<T>* matrix) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; DELETE_ARRAY_NULL_CHECKING(this->data); this->data_type = matrix->getDataType(); this->data_alignment = matrix->getDataAlignment(); this->pinned = matrix->getPinned(); this->initMatrix(matrix->getRowCount(), matrix->getColumnCount(), matrix->getLevelCount()); //copy data //memcpy(this->data, matrix->getDataPtr(), matrix->getRowCount()* matrix->getColumnCount() * matrix->getLevelCount() * sizeof(T)); for(unsigned int k=0;k<this->getLevelCount();k++) { for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j,k, const_cast<const T&>(matrix->getData(i,j,k)) ); } } } return *this; } bool getPinned() const { return this->pinned; } CLASS_RTTI(Lazarus::Matrix3) void serialize() { registerElement<unsigned int>(3); registerElement<enum MATRIX_DATA_ALIGNMENT>(1); registerElement<enum SH_DATA_TYPE>(1); registerUCharA(m_rows*m_columns*m_levels*sizeof(T)); allocateBuffer(); addElement<enum MATRIX_DATA_ALIGNMENT>(data_alignment); addElement<enum SH_DATA_TYPE>(data_type); addUCharA((unsigned char*)data,m_rows*m_columns*m_levels*sizeof(T)); addElement<unsigned int>(m_rows); addElement<unsigned int>(m_columns); addElement<unsigned int>(m_levels); } void deserialize() { //free any data DELETE_NULL_CHECKING(data); m_levels = getElement<unsigned int>(); m_columns = getElement<unsigned int>(); m_rows = getElement<unsigned int>(); unsigned long long size; data = (T*)getUCharA(size); data_type = getElement<enum SH_DATA_TYPE>(); data_alignment = getElement<enum MATRIX_DATA_ALIGNMENT>(); resetBuffer(); } /** * On a call to this method the matrix will be resized to the specified dimensions. Any previously * allocated internal memory will be deleted, keep in mind that in case of pointers the corresponding * objects won't be deleted. Another important fact is that this method merely resizes the matrix, * the "slots" won't be initialized to any default value. */ void initMatrix(int rows, int columns, int levels) { if(rows == 0 || columns == 0 || levels == 0) { return; } if(this->pinned == false) { // release memory if already allocated DELETE_ARRAY_NULL_CHECKING(this->data); // reserve memory this->data = new T[rows*columns*levels]; this->m_rows = rows; this->m_columns = columns; this->m_levels = levels; } else { // release memory if already allocated if(this->data != NULL) { munlock(this->data,sizeof(T)*this->m_columns*this->m_rows*this->m_levels); delete this->data; } // reserve memory and lock it this->data = new T[rows*columns*levels]; mlock(this->data,sizeof(T)*rows*columns*levels); this->m_rows = rows; this->m_columns = columns; this->m_levels = levels; } resetMatrix(); } void resetMatrix() { //memset(this->data,0,sizeof(T)*this->m_columns*this->m_rows*this->m_levels); globalSetMatrixVal(T()); } /** * This is a fast method for setting the initial value */ void globalSetMatrix(unsigned char val) { memset(this->data,val,sizeof(T)*this->m_columns*this->m_rows*this->m_levels); } /** * This is a relatively slow method for setting the initial value */ void globalSetMatrixVal(T val) { for(unsigned int i=0; i < this->m_columns * this->m_rows * this->m_levels; ++i) { this->data[i] = val; } } /** * Returns the specified submatrix * */ Matrix3<T>* getSubMatrix(unsigned int start_row, unsigned int start_column, unsigned int start_level, unsigned int rows, unsigned int columns, unsigned int levels) { Matrix3<T>* output = new Matrix3<T>(this->data_alignment); output->initMatrix(rows,columns,levels); //copy the data if(this->data_alignment == Matrix::MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { for(unsigned int k=0;k<levels;++k) { for(unsigned int i=0;i<rows;++i) { for(unsigned int j=0;j<columns;++j) { output->setData(i,j,k, this->getData(start_row+i,start_column+j,start_level+k)); } } } } else { for(unsigned int k=0;k<levels;++k) { for(unsigned int j=0;j<columns;++j) { for(unsigned int i=0;i<rows;++i) { output->setData(i,j,k, this->getData(start_row+i,start_column+j,start_level+k)); } } } } return output; } /** * Returns a copy of the matrix with added levels, rows and columns, each matrix element is set to val. * */ Matrix3<T>* getPaddedMatrix(unsigned int levels, unsigned int rows, unsigned int columns, const T& val) { Matrix3<T>* output = new Matrix3<T>(this->data_alignment); output->initMatrix(this->m_rows + rows,this->m_columns + columns, this->m_levels + levels); output->globalSetMatrixVal(val); //copy the data if(this->data_alignment == Matrix::MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { for(unsigned int k=0;k< this->m_levels;++k) { for(unsigned int i=0;i<this->m_rows;++i) { for(unsigned int j=0;j<this->m_columns;++j) { output->setData(i, j, k, this->getData(i,j,k)); } } } } else { for(unsigned int k=0;k< this->m_levels;++k) { for(unsigned int j=0;j<this->m_columns;++j) { for(unsigned int i=0;i<this->m_rows;++i) { output->setData(i, j, k, this->getData(i,j,k)); } } } } return output; } void setData(unsigned int i, unsigned int j, unsigned int k, const T& value) { if(i>=m_rows) { printf("ERROR: setData row %d not available\n",i); return ; } if(j>=m_columns) { printf("ERROR: setData column %d not available\n",j); return ; } if(k>=m_columns) { printf("ERROR: setData level %d not available\n",k); return ; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)] = value; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)] = value; } } inline T& getData(unsigned int i, unsigned int j, unsigned int k) const { //T output=0; if(i>=m_rows) { printf("ERROR: getData row %d not available\n",i); return this->data[0]; } if(j>=m_columns) { printf("ERROR: getData column %d not available\n",j); return this->data[0]; } if(k>=m_levels) { printf("ERROR: getData level %d not available\n",k); return this->data[0]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]; } return this->data[0]; //return output; } T& operator () (unsigned int& i, unsigned int& j, unsigned int& k) { //T output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]; } return T(); //return output; } inline T* getDataRef(unsigned int i, unsigned int j, unsigned int k) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { output = &(this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { output = &(this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]); } return output; } /* * This method returns a pointer to row i of the underlying data array. * I.e. one can return a pointer to the i-th row of a row-major matrix, * k determines the slice. */ inline T* getRawDataRow(unsigned int i, unsigned int k) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { printf("ERROR: can not return row reference as the matrix is in column major format\n"); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { output = &(this->data[ k*this->m_columns*this->m_rows + i*this->m_columns ] ); } return output; } /* * This method returns a pointer to column i of the underlying data array. * I.e. one can return a pointer to the i-th column of a column-major matrix, * k determines the slice. */ inline T* getRawDataColumn(unsigned int i, unsigned int k) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { printf("ERROR: can not return column reference as the matrix is in row major format\n"); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { output = &(this->data[ k*this->m_columns*this->m_rows + i*this->m_rows ] ); } return output; } T* getDataPtr() const { return this->data; } int getRowCount() const { return this->m_rows; } int getColumnCount() const { return this->m_columns; } int getLevelCount() const { return this->m_levels; } int rows() const { return this->m_rows; } int columns() const { return this->m_columns; } int levels() const { return this->m_levels; } /* * this should only be used for debug purposes and especially for small matrices, thus we don't care * about the switch within each iteration of the inner most for loop. */ void printData() const { for(unsigned int k=0;k<m_levels;++k) { for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { switch(this->data_type) { case SH_DATA_TYPE_DOUBLE: printf("%F ", (double)getData(i,j,k)); break; case SH_DATA_TYPE_FLOAT: printf("%f ", (float)getData(i,j,k)); break; case SH_DATA_TYPE_LONG_DOUBLE: printf("%Lf ", (long double)getData(i,j,k)); break; case SH_DATA_TYPE_INT: printf("%d ", (int)getData(i,j,k)); break; case SH_DATA_TYPE_LONGLONG: printf("%lld ", (long long int)getData(i,j,k)); break; case SH_DATA_TYPE_ULONGLONG: printf("%llu ", (unsigned long long)getData(i,j,k)); break; case SH_DATA_TYPE_LONG: printf("%ld ", (long int)getData(i,j,k)); break; case SH_DATA_TYPE_ULONG: printf("%lu ", (unsigned long)getData(i,j,k)); break; case SH_DATA_TYPE_UINT: printf("%u ", (unsigned int)getData(i,j,k)); break; case SH_DATA_TYPE_UCHAR: printf("%c ", (unsigned char)getData(i,j,k)); break; case SH_DATA_TYPE_CHAR: printf("%d ", (char)getData(i,j,k)); break; case SH_DATA_TYPE_SHORT: printf("%hd ", (short)getData(i,j,k)); break; case SH_DATA_TYPE_USHORT: printf("%hu ", (unsigned short)getData(i,j,k)); break; case SH_DATA_TYPE_ABSTRACT: printf("ABSTRACT "); break; case SH_DATA_TYPE_UNSPECIFIED: printf(" UNKNOWN "); break; } } printf("\n"); } printf("\n****************\n"); } } void readMatrixFromFile(const std::string& filename) { setlocale(LC_ALL,"C"); FILE* pFile = fopen(filename.c_str(),"r"); std::string s; //read header and init data structures // from now on we assume a correct format of the input file !! std::string rows=""; std::string columns=""; std::string levels=""; readCSVValue(&pFile,&rows); readCSVValue(&pFile,&columns); readCSVValue(&pFile,&levels); this->initMatrix( Toolbox::stringToInt( rows ),Toolbox::stringToInt( columns ),Toolbox::stringToInt( levels ) ); //load data into matrix for(unsigned int k=0;k<this->m_levels;k++) { for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { readCSVValue(&pFile,&s); //read token #ifdef DEBUG printf("setting number %s \n",s.c_str()); #endif this->setData(i,j,k, Toolbox::stringToDouble( s ) ); } } } fclose(pFile); } void readMatrixFromBinaryFile(const std::string& filename) { FILE* pFile = fopen(filename.c_str(),"rb"); std::string s; //read header and init data structures // from now on we assume a correct format of the input file !! std::string rows=""; std::string columns=""; std::string levels=""; readCSVValue(&pFile,&rows); readCSVValue(&pFile,&columns); readCSVValue(&pFile,&levels); this->initMatrix( Toolbox::stringToInt( rows ),Toolbox::stringToInt( columns ),Toolbox::stringToInt( levels ) ); //load data into matrix fread(this->data, this->m_rows*this->m_columns*this->m_levels,sizeof(T), pFile); fclose(pFile); } void writeMatrixToFile(const std::string& filename) const { if(data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can't save abstract matrix in CSV format, use binary export methods\n"); } setlocale(LC_ALL,"C"); FILE* pFile = fopen(filename.c_str(),"w"); std::string s; //header s = Toolbox::intToString( this->m_rows ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_columns ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_levels ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(';',pFile); fputc('\n',pFile); //data for(unsigned int k=0;k<this->m_levels;k++) { for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { s = Toolbox::doubleToString( this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)] ); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { s = Toolbox::doubleToString( this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)] ); } fwrite(s.c_str(), s.length(),sizeof(char), pFile); if(j+1<this->columns) fputc(',',pFile); } fputc(';',pFile); fputc('\n',pFile); } } fclose(pFile); } void writeMatrixToBinaryFile(const std::string& filename) const { FILE* pFile = fopen(filename.c_str(),"wb"); std::string s; //header s = Toolbox::intToString( this->m_rows ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_columns ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_levels ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(';',pFile); fputc('\n',pFile); //data fwrite(this->data, this->m_columns*this->m_rows*this->m_levels,sizeof(T), pFile); fclose(pFile); } void exportMatrix(const std::string& filename) { if(data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can't save abstract matrix in CSV format, use binary export methods\n"); } FILE* pFile = fopen(filename.c_str(),"w"); std::string s; for(unsigned int k=0;k<this->m_levels;k++) { for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { s = Toolbox::doubleToString( (double)getData(i,j,k) ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); if(j+1<this->columns) fputc(',',pFile); } fputc(';',pFile); fputc('\n',pFile); } } fclose(pFile); } void exportMatrixBinary(const std::string& filename) { FILE* pFile = fopen(filename.c_str(),"wb"); //data fwrite(this->data, this->m_columns*this->m_rows*this->m_levels,sizeof(T), pFile); fclose(pFile); } enum MATRIX_DATA_ALIGNMENT getDataAlignment() const { return this->data_alignment; } enum SH_DATA_TYPE getDataType() const { return this->data_type; } protected: int m_rows; int m_columns; int m_levels; T* data; bool pinned; // reads a csv-token, returns false if terminated by , and true if terminated by ; or EOF bool readCSVValue(FILE** pFile, std::string* s) { char c[2]; c[1] = '\0'; *s = ""; //reset string //read chars until , ; or EOF found while(1){ c[0] = fgetc(*pFile); if(c[0]==',') return false; if(c[0]==';' || c[0]==EOF) { fgetc(*pFile); //read ahead one char, i.e. skip line break. Uncomment this if linebreaks are not being used after ; return true; } #ifdef DEBUG printf("appending %s to %s \n",(char*)&c,(*s).c_str()); #endif s->append(c); } } }; } #endif /* MATRIX_H_ */
cityblock.c
#include <math.h> void cbdm(double *a, double *b, double *r, int num_rows, int num_cols) { double _r = 0.0; #pragma omp parallel for reduction (+:_r) for(int i = 0; i < num_rows; i++) { for(int j = 0; j < num_rows ; j++) { _r = 0.0; for(int k = 0; k < num_cols ; k++) { _r += fabs(a[i * num_cols + k] - b[j * num_cols + k]); } r[i * num_rows + j] = _r; } } }
csr_matop.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matrix operation functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "seq_mv.h" #include "csr_matrix.h" /*-------------------------------------------------------------------------- * hypre_CSRMatrixAddFirstPass: * * Performs the first pass needed for Matrix/Matrix addition (C = A + B). * This function: * 1) Computes the row pointer of the resulting matrix C_i * 2) Allocates memory for the matrix C and returns it to the user * * Notes: 1) It can be used safely inside OpenMP parallel regions. * 2) firstrow, lastrow and marker are private variables. * 3) The remaining arguments are shared variables. * 4) twspace (thread workspace) must be allocated outside the * parallel region. * 5) The mapping arrays map_A2C and map_B2C are used when adding * off-diagonal matrices. They can be set to NULL pointer when * adding diagonal matrices. * 6) Assumes that the elements of C_i are initialized to zero. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixAddFirstPass( HYPRE_Int firstrow, HYPRE_Int lastrow, HYPRE_Int *twspace, HYPRE_Int *marker, HYPRE_Int *map_A2C, HYPRE_Int *map_B2C, hypre_CSRMatrix *A, hypre_CSRMatrix *B, HYPRE_Int nrows_C, HYPRE_Int nnzrows_C, HYPRE_Int ncols_C, HYPRE_Int *rownnz_C, HYPRE_MemoryLocation memory_location_C, HYPRE_Int *C_i, hypre_CSRMatrix **C_ptr ) { HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int i, ia, ib, ic, iic, ii, i1; HYPRE_Int jcol, jj; HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int num_nonzeros; /* Initialize marker array */ for (i = 0; i < ncols_C; i++) { marker[i] = -1; } ii = hypre_GetThreadNum(); num_nonzeros = 0; for (ic = firstrow; ic < lastrow; ic++) { iic = rownnz_C ? rownnz_C[ic] : ic; if (map_A2C) { for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++) { jcol = map_A2C[A_j[ia]]; marker[jcol] = iic; num_nonzeros++; } } else { for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++) { jcol = A_j[ia]; marker[jcol] = iic; num_nonzeros++; } } if (map_B2C) { for (ib = B_i[iic]; ib < B_i[iic + 1]; ib++) { jcol = map_B2C[B_j[ib]]; if (marker[jcol] != iic) { marker[jcol] = iic; num_nonzeros++; } } } else { for (ib = B_i[iic]; ib < B_i[iic + 1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != iic) { marker[jcol] = iic; num_nonzeros++; } } } C_i[iic + 1] = num_nonzeros; } twspace[ii] = num_nonzeros; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Correct C_i - phase 1 */ if (ii) { jj = twspace[0]; for (i1 = 1; i1 < ii; i1++) { jj += twspace[i1]; } for (ic = firstrow; ic < lastrow; ic++) { iic = rownnz_C ? rownnz_C[ic] : ic; C_i[iic + 1] += jj; } } else { num_nonzeros = 0; for (i1 = 0; i1 < num_threads; i1++) { num_nonzeros += twspace[i1]; } *C_ptr = hypre_CSRMatrixCreate(nrows_C, ncols_C, num_nonzeros); hypre_CSRMatrixI(*C_ptr) = C_i; hypre_CSRMatrixRownnz(*C_ptr) = rownnz_C; hypre_CSRMatrixNumRownnz(*C_ptr) = nnzrows_C; hypre_CSRMatrixInitialize_v2(*C_ptr, 0, memory_location_C); } /* Correct C_i - phase 2 */ if (rownnz_C != NULL) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ic = firstrow; ic < (lastrow - 1); ic++) { for (iic = rownnz_C[ic] + 1; iic < rownnz_C[ic + 1]; iic++) { hypre_assert(C_i[iic + 1] == 0); C_i[iic + 1] = C_i[rownnz_C[ic] + 1]; } } if (ii < (num_threads - 1)) { for (iic = rownnz_C[lastrow - 1] + 1; iic < rownnz_C[lastrow]; iic++) { hypre_assert(C_i[iic + 1] == 0); C_i[iic + 1] = C_i[rownnz_C[lastrow - 1] + 1]; } } else { for (iic = rownnz_C[lastrow - 1] + 1; iic < nrows_C; iic++) { hypre_assert(C_i[iic + 1] == 0); C_i[iic + 1] = C_i[rownnz_C[lastrow - 1] + 1]; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif #ifdef HYPRE_DEBUG if (!ii) { for (i = 0; i < nrows_C; i++) { hypre_assert(C_i[i] <= C_i[i + 1]); hypre_assert(((A_i[i + 1] - A_i[i]) + (B_i[i + 1] - B_i[i])) >= (C_i[i + 1] - C_i[i])); hypre_assert((C_i[i + 1] - C_i[i]) >= (A_i[i + 1] - A_i[i])); hypre_assert((C_i[i + 1] - C_i[i]) >= (B_i[i + 1] - B_i[i])); } hypre_assert((C_i[nrows_C] - C_i[0]) == num_nonzeros); } #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixAddSecondPass: * * Performs the second pass needed for Matrix/Matrix addition (C = A + B). * This function computes C_j and C_data. * * Notes: see notes for hypre_CSRMatrixAddFirstPass *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixAddSecondPass( HYPRE_Int firstrow, HYPRE_Int lastrow, HYPRE_Int *twspace, HYPRE_Int *marker, HYPRE_Int *map_A2C, HYPRE_Int *map_B2C, HYPRE_Int *rownnz_C, HYPRE_Complex alpha, HYPRE_Complex beta, hypre_CSRMatrix *A, hypre_CSRMatrix *B, hypre_CSRMatrix *C ) { HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int nnzs_A = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int nnzs_B = hypre_CSRMatrixNumNonzeros(B); HYPRE_Int *C_i = hypre_CSRMatrixI(C); HYPRE_Int *C_j = hypre_CSRMatrixJ(C); HYPRE_Complex *C_data = hypre_CSRMatrixData(C); HYPRE_Int ncols_C = hypre_CSRMatrixNumCols(C); HYPRE_Int ia, ib, ic, iic; HYPRE_Int jcol, pos; hypre_assert(( map_A2C && map_B2C) || (!map_A2C && !map_B2C) || ( map_A2C && (nnzs_B == 0)) || ( map_B2C && (nnzs_A == 0))); /* Initialize marker vector */ for (ia = 0; ia < ncols_C; ia++) { marker[ia] = -1; } pos = C_i[rownnz_C ? rownnz_C[firstrow] : firstrow]; if ((map_A2C && map_B2C) || ( map_A2C && (nnzs_B == 0)) || ( map_B2C && (nnzs_A == 0))) { for (ic = firstrow; ic < lastrow; ic++) { iic = rownnz_C ? rownnz_C[ic] : ic; for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++) { jcol = map_A2C[A_j[ia]]; C_j[pos] = jcol; C_data[pos] = alpha * A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[iic]; ib < B_i[iic + 1]; ib++) { jcol = map_B2C[B_j[ib]]; if (marker[jcol] < C_i[iic]) { C_j[pos] = jcol; C_data[pos] = beta * B_data[ib]; marker[jcol] = pos; pos++; } else { hypre_assert(C_j[marker[jcol]] == jcol); C_data[marker[jcol]] += beta * B_data[ib]; } } hypre_assert(pos == C_i[iic + 1]); } /* end for loop */ } else { for (ic = firstrow; ic < lastrow; ic++) { iic = rownnz_C ? rownnz_C[ic] : ic; for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = alpha * A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[iic]; ib < B_i[iic + 1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[iic]) { C_j[pos] = jcol; C_data[pos] = beta * B_data[ib]; marker[jcol] = pos; pos++; } else { hypre_assert(C_j[marker[jcol]] == jcol); C_data[marker[jcol]] += beta * B_data[ib]; } } hypre_assert(pos == C_i[iic + 1]); } /* end for loop */ } hypre_assert(pos == C_i[rownnz_C ? rownnz_C[lastrow - 1] + 1 : lastrow]); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixAdd: * * Adds two CSR Matrices A and B and returns a CSR Matrix C = alpha*A + beta*B; * * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained * in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_CSRMatrixAddHost ( HYPRE_Complex alpha, hypre_CSRMatrix *A, HYPRE_Complex beta, hypre_CSRMatrix *B ) { /* CSRMatrix A */ HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); /* CSRMatrix B */ HYPRE_Int *rownnz_B = hypre_CSRMatrixRownnz(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int nnzrows_B = hypre_CSRMatrixNumRownnz(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); /* CSRMatrix C */ hypre_CSRMatrix *C; HYPRE_Int *C_i; HYPRE_Int *rownnz_C; HYPRE_Int nnzrows_C; HYPRE_Int *twspace; HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (nrows_A != nrows_B || ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! incompatible matrix dimensions!\n"); return NULL; } /* Allocate memory */ twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C); /* Set nonzero rows data of diag_C */ nnzrows_C = nrows_A; if ((nnzrows_A < nrows_A) && (nnzrows_B < nrows_B)) { hypre_MergeOrderedArrays(nnzrows_A, rownnz_A, nnzrows_B, rownnz_B, &nnzrows_C, &rownnz_C); } else { rownnz_C = NULL; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int ns, ne; HYPRE_Int *marker = NULL; hypre_partition1D(nnzrows_C, hypre_NumActiveThreads(), hypre_GetThreadNum(), &ns, &ne); marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker, NULL, NULL, A, B, nrows_A, nnzrows_C, ncols_A, rownnz_C, memory_location_C, C_i, &C); hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker, NULL, NULL, rownnz_C, alpha, beta, A, B, C); hypre_TFree(marker, HYPRE_MEMORY_HOST); } /* end of parallel region */ /* Free memory */ hypre_TFree(twspace, HYPRE_MEMORY_HOST); return C; } hypre_CSRMatrix* hypre_CSRMatrixAdd( HYPRE_Complex alpha, hypre_CSRMatrix *A, HYPRE_Complex beta, hypre_CSRMatrix *B) { hypre_CSRMatrix *C = NULL; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A), hypre_CSRMatrixMemoryLocation(B) ); if (exec == HYPRE_EXEC_DEVICE) { C = hypre_CSRMatrixAddDevice(alpha, A, beta, B); } else #endif { C = hypre_CSRMatrixAddHost(alpha, A, beta, B); } return C; } #if 0 /*-------------------------------------------------------------------------- * hypre_CSRMatrixBigAdd: * * RL: comment it out which was used in ams.c. Should be combined with * above hypre_CSRMatrixAddHost whenever it is needed again * * Adds two CSR Matrices A and B with column indices stored as HYPRE_BigInt * and returns a CSR Matrix C; * * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained * in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixBigAdd( hypre_CSRMatrix *A, hypre_CSRMatrix *B ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_BigInt *A_j = hypre_CSRMatrixBigJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_BigInt *B_j = hypre_CSRMatrixBigJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_BigInt *C_j; HYPRE_Int *twspace; HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (nrows_A != nrows_B || ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! incompatible matrix dimensions!\n"); return NULL; } /* Allocate memory */ twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int ia, ib, ic, num_nonzeros; HYPRE_Int ns, ne, pos; HYPRE_BigInt jcol; HYPRE_Int ii, num_threads; HYPRE_Int jj; HYPRE_Int *marker = NULL; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); hypre_partition1D(nrows_A, num_threads, ii, &ns, &ne); marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } /* First pass */ num_nonzeros = 0; for (ic = ns; ic < ne; ic++) { C_i[ic] = num_nonzeros; for (ia = A_i[ic]; ia < A_i[ic + 1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } for (ib = B_i[ic]; ib < B_i[ic + 1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } C_i[ic + 1] = num_nonzeros; } twspace[ii] = num_nonzeros; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Correct row pointer */ if (ii) { jj = twspace[0]; for (ic = 1; ic < ii; ic++) { jj += twspace[ia]; } for (ic = ns; ic < ne; ic++) { C_i[ic] += jj; } } else { C_i[nrows_A] = 0; for (ic = 0; ic < num_threads; ic++) { C_i[nrows_A] += twspace[ic]; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, C_i[nrows_A]); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize_v2(C, 1, memory_location_C); C_j = hypre_CSRMatrixBigJ(C); C_data = hypre_CSRMatrixData(C); } /* Second pass */ for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } pos = C_i[ns]; for (ic = ns; ic < ne; ic++) { for (ia = A_i[ic]; ia < A_i[ic + 1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[ic]; ib < B_i[ic + 1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); } /* end of parallel region */ /* Free memory */ hypre_TFree(twspace, HYPRE_MEMORY_HOST); return C; } #endif /*-------------------------------------------------------------------------- * hypre_CSRMatrixMultiplyHost * * Multiplies two CSR Matrices A and B and returns a CSR Matrix C; * * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained * in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_CSRMatrixMultiplyHost( hypre_CSRMatrix *A, hypre_CSRMatrix *B ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A); HYPRE_Int num_nnz_A = hypre_CSRMatrixNumNonzeros(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); HYPRE_Int num_nnz_B = hypre_CSRMatrixNumNonzeros(B); HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, ja, jb, num_nonzeros; HYPRE_Int counter; HYPRE_Complex a_entry, b_entry; HYPRE_Int allsquare = 0; HYPRE_Int *twspace; /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (ncols_A != nrows_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! incompatible matrix dimensions!\n"); return NULL; } if (nrows_A == ncols_B) { allsquare = 1; } if ((num_nnz_A == 0) || (num_nnz_B == 0)) { C = hypre_CSRMatrixCreate(nrows_A, ncols_B, 0); hypre_CSRMatrixNumRownnz(C) = 0; hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C); return C; } /* Allocate memory */ twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ia, ib, ic, ja, jb, num_nonzeros, counter, a_entry, b_entry) #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, ii, jj; HYPRE_Int num_threads; HYPRE_Int i1, iic; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); hypre_partition1D(nnzrows_A, num_threads, ii, &ns, &ne); B_marker = hypre_CTAlloc(HYPRE_Int, ncols_B, HYPRE_MEMORY_HOST); for (ib = 0; ib < ncols_B; ib++) { B_marker[ib] = -1; } HYPRE_ANNOTATE_REGION_BEGIN("%s", "First pass"); /* First pass: compute sizes of C rows. */ num_nonzeros = 0; for (ic = ns; ic < ne; ic++) { if (rownnz_A) { iic = rownnz_A[ic]; C_i[iic] = num_nonzeros; } else { iic = ic; C_i[iic] = num_nonzeros; if (allsquare) { B_marker[iic] = iic; num_nonzeros++; } } for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++) { ja = A_j[ia]; for (ib = B_i[ja]; ib < B_i[ja + 1]; ib++) { jb = B_j[ib]; if (B_marker[jb] != iic) { B_marker[jb] = iic; num_nonzeros++; } } } } twspace[ii] = num_nonzeros; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Correct C_i - phase 1 */ if (ii) { jj = twspace[0]; for (i1 = 1; i1 < ii; i1++) { jj += twspace[i1]; } for (i1 = ns; i1 < ne; i1++) { iic = rownnz_A ? rownnz_A[i1] : i1; C_i[iic] += jj; } } else { C_i[nrows_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { C_i[nrows_A] += twspace[i1]; } C = hypre_CSRMatrixCreate(nrows_A, ncols_B, C_i[nrows_A]); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); } /* Correct C_i - phase 2 */ if (rownnz_A != NULL) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ic = ns; ic < (ne - 1); ic++) { for (iic = rownnz_A[ic] + 1; iic < rownnz_A[ic + 1]; iic++) { C_i[iic] = C_i[rownnz_A[ic + 1]]; } } if (ii < (num_threads - 1)) { for (iic = rownnz_A[ne - 1] + 1; iic < rownnz_A[ne]; iic++) { C_i[iic] = C_i[rownnz_A[ne]]; } } else { for (iic = rownnz_A[ne - 1] + 1; iic < nrows_A; iic++) { C_i[iic] = C_i[nrows_A]; } } } /* End of First Pass */ HYPRE_ANNOTATE_REGION_END("%s", "First pass"); #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Second pass: Fill in C_data and C_j. */ HYPRE_ANNOTATE_REGION_BEGIN("%s", "Second pass"); for (ib = 0; ib < ncols_B; ib++) { B_marker[ib] = -1; } counter = rownnz_A ? C_i[rownnz_A[ns]] : C_i[ns]; for (ic = ns; ic < ne; ic++) { if (rownnz_A) { iic = rownnz_A[ic]; } else { iic = ic; if (allsquare) { B_marker[ic] = counter; C_data[counter] = 0; C_j[counter] = ic; counter++; } } for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++) { ja = A_j[ia]; a_entry = A_data[ia]; for (ib = B_i[ja]; ib < B_i[ja + 1]; ib++) { jb = B_j[ib]; b_entry = B_data[ib]; if (B_marker[jb] < C_i[iic]) { B_marker[jb] = counter; C_j[B_marker[jb]] = jb; C_data[B_marker[jb]] = a_entry * b_entry; counter++; } else { C_data[B_marker[jb]] += a_entry * b_entry; } } } } HYPRE_ANNOTATE_REGION_END("%s", "Second pass"); /* End of Second Pass */ hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ #ifdef HYPRE_DEBUG for (ic = 0; ic < nrows_A; ic++) { hypre_assert(C_i[ic] <= C_i[ic + 1]); } #endif // Set rownnz and num_rownnz hypre_CSRMatrixSetRownnz(C); /* Free memory */ hypre_TFree(twspace, HYPRE_MEMORY_HOST); return C; } hypre_CSRMatrix* hypre_CSRMatrixMultiply( hypre_CSRMatrix *A, hypre_CSRMatrix *B) { hypre_CSRMatrix *C = NULL; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A), hypre_CSRMatrixMemoryLocation(B) ); if (exec == HYPRE_EXEC_DEVICE) { C = hypre_CSRMatrixMultiplyDevice(A, B); } else #endif { C = hypre_CSRMatrixMultiplyHost(A, B); } return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixDeleteZeros( hypre_CSRMatrix *A, HYPRE_Real tol ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); hypre_CSRMatrix *B; HYPRE_Complex *B_data; HYPRE_Int *B_i; HYPRE_Int *B_j; HYPRE_Int zeros; HYPRE_Int i, j; HYPRE_Int pos_A, pos_B; zeros = 0; for (i = 0; i < num_nonzeros; i++) { if (hypre_cabs(A_data[i]) <= tol) { zeros++; } } if (zeros) { B = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros - zeros); hypre_CSRMatrixInitialize(B); B_i = hypre_CSRMatrixI(B); B_j = hypre_CSRMatrixJ(B); B_data = hypre_CSRMatrixData(B); B_i[0] = 0; pos_A = pos_B = 0; for (i = 0; i < nrows_A; i++) { for (j = A_i[i]; j < A_i[i + 1]; j++) { if (hypre_cabs(A_data[j]) <= tol) { pos_A++; } else { B_data[pos_B] = A_data[pos_A]; B_j[pos_B] = A_j[pos_A]; pos_B++; pos_A++; } } B_i[i + 1] = pos_B; } return B; } else { return NULL; } } /****************************************************************************** * * Finds transpose of a hypre_CSRMatrix * *****************************************************************************/ /** * idx = idx2*dim1 + idx1 * -> ret = idx1*dim2 + idx2 * = (idx%dim1)*dim2 + idx/dim1 */ static inline HYPRE_Int transpose_idx (HYPRE_Int idx, HYPRE_Int dim1, HYPRE_Int dim2) { return idx % dim1 * dim2 + idx / dim1; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixTransposeHost *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixTransposeHost(hypre_CSRMatrix *A, hypre_CSRMatrix **AT, HYPRE_Int data) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A); HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A); HYPRE_Int num_rows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols_A = hypre_CSRMatrixNumCols(A); HYPRE_Int num_nnzs_A = hypre_CSRMatrixNumNonzeros(A); HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); HYPRE_Complex *AT_data; HYPRE_Int *AT_j; HYPRE_Int num_rows_AT; HYPRE_Int num_cols_AT; HYPRE_Int num_nnzs_AT; HYPRE_Int max_col; HYPRE_Int i, j; /*-------------------------------------------------------------- * First, ascertain that num_cols and num_nonzeros has been set. * If not, set them. *--------------------------------------------------------------*/ HYPRE_ANNOTATE_FUNC_BEGIN; if (!num_nnzs_A) { num_nnzs_A = A_i[num_rows_A]; } if (num_rows_A && num_nnzs_A && ! num_cols_A) { max_col = -1; for (i = 0; i < num_rows_A; ++i) { for (j = A_i[i]; j < A_i[i + 1]; j++) { if (A_j[j] > max_col) { max_col = A_j[j]; } } } num_cols_A = max_col + 1; } num_rows_AT = num_cols_A; num_cols_AT = num_rows_A; num_nnzs_AT = num_nnzs_A; *AT = hypre_CSRMatrixCreate(num_rows_AT, num_cols_AT, num_nnzs_AT); hypre_CSRMatrixMemoryLocation(*AT) = memory_location; if (num_cols_A == 0) { // JSP: parallel counting sorting breaks down // when A has no columns hypre_CSRMatrixInitialize(*AT); HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } AT_j = hypre_CTAlloc(HYPRE_Int, num_nnzs_AT, memory_location); hypre_CSRMatrixJ(*AT) = AT_j; if (data) { AT_data = hypre_CTAlloc(HYPRE_Complex, num_nnzs_AT, memory_location); hypre_CSRMatrixData(*AT) = AT_data; } /*----------------------------------------------------------------- * Parallel count sort *-----------------------------------------------------------------*/ HYPRE_Int *bucket = hypre_CTAlloc(HYPRE_Int, (num_cols_A + 1) * hypre_NumThreads(), HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int ii, num_threads, ns, ne; HYPRE_Int i, j, j0, j1, ir; HYPRE_Int idx, offset; HYPRE_Int transpose_i; HYPRE_Int transpose_i_minus_1; HYPRE_Int transpose_i0; HYPRE_Int transpose_j0; HYPRE_Int transpose_j1; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); hypre_partition1D(nnzrows_A, num_threads, ii, &ns, &ne); /*----------------------------------------------------------------- * Count the number of entries that will go into each bucket * bucket is used as HYPRE_Int[num_threads][num_colsA] 2D array *-----------------------------------------------------------------*/ if (rownnz_A == NULL) { for (j = A_i[ns]; j < A_i[ne]; ++j) { bucket[ii * num_cols_A + A_j[j]]++; } } else { for (i = ns; i < ne; i++) { ir = rownnz_A[i]; for (j = A_i[ir]; j < A_i[ir + 1]; ++j) { bucket[ii * num_cols_A + A_j[j]]++; } } } /*----------------------------------------------------------------- * Parallel prefix sum of bucket with length num_colsA * num_threads * accessed as if it is transposed as HYPRE_Int[num_colsA][num_threads] *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ii * num_cols_A + 1; i < (ii + 1)*num_cols_A; ++i) { transpose_i = transpose_idx(i, num_threads, num_cols_A); transpose_i_minus_1 = transpose_idx(i - 1, num_threads, num_cols_A); bucket[transpose_i] += bucket[transpose_i_minus_1]; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { for (i = 1; i < num_threads; ++i) { j0 = num_cols_A * i - 1; j1 = num_cols_A * (i + 1) - 1; transpose_j0 = transpose_idx(j0, num_threads, num_cols_A); transpose_j1 = transpose_idx(j1, num_threads, num_cols_A); bucket[transpose_j1] += bucket[transpose_j0]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii > 0) { transpose_i0 = transpose_idx(num_cols_A * ii - 1, num_threads, num_cols_A); offset = bucket[transpose_i0]; for (i = ii * num_cols_A; i < (ii + 1)*num_cols_A - 1; ++i) { transpose_i = transpose_idx(i, num_threads, num_cols_A); bucket[transpose_i] += offset; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*---------------------------------------------------------------- * Load the data and column numbers of AT *----------------------------------------------------------------*/ if (data) { for (i = ne - 1; i >= ns; --i) { ir = rownnz_A ? rownnz_A[i] : i; for (j = A_i[ir + 1] - 1; j >= A_i[ir]; --j) { idx = A_j[j]; --bucket[ii * num_cols_A + idx]; offset = bucket[ii * num_cols_A + idx]; AT_data[offset] = A_data[j]; AT_j[offset] = ir; } } } else { for (i = ne - 1; i >= ns; --i) { ir = rownnz_A ? rownnz_A[i] : i; for (j = A_i[ir + 1] - 1; j >= A_i[ir]; --j) { idx = A_j[j]; --bucket[ii * num_cols_A + idx]; offset = bucket[ii * num_cols_A + idx]; AT_j[offset] = ir; } } } } /* end parallel region */ hypre_CSRMatrixI(*AT) = hypre_TAlloc(HYPRE_Int, num_cols_A + 1, memory_location); hypre_TMemcpy(hypre_CSRMatrixI(*AT), bucket, HYPRE_Int, num_cols_A + 1, memory_location, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(*AT)[num_cols_A] = num_nnzs_A; hypre_TFree(bucket, HYPRE_MEMORY_HOST); // Set rownnz and num_rownnz if (hypre_CSRMatrixNumRownnz(A) < num_rows_A) { hypre_CSRMatrixSetRownnz(*AT); } HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixTranspose(hypre_CSRMatrix *A, hypre_CSRMatrix **AT, HYPRE_Int data) { HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_CSRMatrixTransposeDevice(A, AT, data); } else #endif { ierr = hypre_CSRMatrixTransposeHost(A, AT, data); } return ierr; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixSplit *--------------------------------------------------------------------------*/ /* RL: TODO add memory locations */ HYPRE_Int hypre_CSRMatrixSplit(hypre_CSRMatrix *Bs_ext, HYPRE_BigInt first_col_diag_B, HYPRE_BigInt last_col_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_BigInt *col_map_offd_B, HYPRE_Int *num_cols_offd_C_ptr, HYPRE_BigInt **col_map_offd_C_ptr, hypre_CSRMatrix **Bext_diag_ptr, hypre_CSRMatrix **Bext_offd_ptr) { HYPRE_Complex *Bs_ext_data = hypre_CSRMatrixData(Bs_ext); HYPRE_Int *Bs_ext_i = hypre_CSRMatrixI(Bs_ext); HYPRE_BigInt *Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); HYPRE_Int num_rows_Bext = hypre_CSRMatrixNumRows(Bs_ext); HYPRE_Int B_ext_diag_size = 0; HYPRE_Int B_ext_offd_size = 0; HYPRE_Int *B_ext_diag_i = NULL; HYPRE_Int *B_ext_diag_j = NULL; HYPRE_Complex *B_ext_diag_data = NULL; HYPRE_Int *B_ext_offd_i = NULL; HYPRE_Int *B_ext_offd_j = NULL; HYPRE_Complex *B_ext_offd_data = NULL; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_BigInt *temp; HYPRE_Int max_num_threads; HYPRE_Int cnt = 0; hypre_CSRMatrix *Bext_diag = NULL; hypre_CSRMatrix *Bext_offd = NULL; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int num_cols_offd_C = 0; B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext + 1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext + 1, HYPRE_MEMORY_HOST); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int ns, ne, ii, num_threads; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); hypre_partition1D(num_rows_Bext, num_threads, ii, &ns, &ne); my_diag_size = 0; my_offd_size = 0; for (i = ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_rows_Bext] = B_ext_diag_size; B_ext_offd_i[num_rows_Bext] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) { temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size + num_cols_offd_B, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_ext_offd_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B; B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } /* This computes the mappings */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i = 0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt - 1); num_cols_offd_C = 1; HYPRE_BigInt value = temp[0]; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ns; i < ne; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i + 1]; j++) { B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_ext_offd_j[j], num_cols_offd_C); } } } /* end parallel region */ hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); Bext_diag = hypre_CSRMatrixCreate(num_rows_Bext, last_col_diag_B - first_col_diag_B + 1, B_ext_diag_size); hypre_CSRMatrixMemoryLocation(Bext_diag) = HYPRE_MEMORY_HOST; Bext_offd = hypre_CSRMatrixCreate(num_rows_Bext, num_cols_offd_C, B_ext_offd_size); hypre_CSRMatrixMemoryLocation(Bext_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(Bext_diag) = B_ext_diag_i; hypre_CSRMatrixJ(Bext_diag) = B_ext_diag_j; hypre_CSRMatrixData(Bext_diag) = B_ext_diag_data; hypre_CSRMatrixI(Bext_offd) = B_ext_offd_i; hypre_CSRMatrixJ(Bext_offd) = B_ext_offd_j; hypre_CSRMatrixData(Bext_offd) = B_ext_offd_data; *col_map_offd_C_ptr = col_map_offd_C; *Bext_diag_ptr = Bext_diag; *Bext_offd_ptr = Bext_offd; *num_cols_offd_C_ptr = num_cols_offd_C; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixReorderHost *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixReorderHost(hypre_CSRMatrix *A) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A); HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A); HYPRE_Int num_rows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols_A = hypre_CSRMatrixNumCols(A); HYPRE_Int i, ii, j; /* the matrix should be square */ if (num_rows_A != num_cols_A) { return -1; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i, ii, j) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < nnzrows_A; i++) { ii = rownnz_A ? rownnz_A[i] : i; for (j = A_i[ii]; j < A_i[ii + 1]; j++) { if (A_j[j] == ii) { if (j != A_i[ii]) { hypre_swap(A_j, A_i[ii], j); hypre_swap_c(A_data, A_i[ii], j); } break; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixReorder: * * Reorders the column and data arrays of a square CSR matrix, such that the * first entry in each row is the diagonal one. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixReorder(hypre_CSRMatrix *A) { HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_CSRMatrixMoveDiagFirstDevice(A); } else #endif { ierr = hypre_CSRMatrixReorderHost(A); } return ierr; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixAddPartial: * adds matrix rows in the CSR matrix B to the CSR Matrix A, where row_nums[i] * defines to which row of A the i-th row of B is added, and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained * in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixAddPartial( hypre_CSRMatrix *A, hypre_CSRMatrix *B, HYPRE_Int *row_nums) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, jcol, num_nonzeros; HYPRE_Int pos, i, i2, j, cnt; HYPRE_Int *marker; HYPRE_Int *map; HYPRE_Int *temp; HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! incompatible matrix dimensions!\n"); return NULL; } map = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST); for (i = 0; i < nrows_B; i++) { map[i] = i; temp[i] = row_nums[i]; } hypre_qsort2i(temp, map, 0, nrows_B - 1); marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } num_nonzeros = 0; C_i[0] = 0; cnt = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic + 1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } if (cnt < nrows_B && temp[cnt] == ic) { for (j = cnt; j < nrows_B; j++) { if (temp[j] == ic) { i2 = map[cnt++]; for (ib = B_i[i2]; ib < B_i[i2 + 1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } } else { break; } } } C_i[ic + 1] = num_nonzeros; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } cnt = 0; pos = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic + 1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } if (cnt < nrows_B && temp[cnt] == ic) { for (j = cnt; j < nrows_B; j++) { if (temp[j] == ic) { i2 = map[cnt++]; for (ib = B_i[i2]; ib < B_i[i2 + 1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } else { break; } } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); hypre_TFree(map, HYPRE_MEMORY_HOST); hypre_TFree(temp, HYPRE_MEMORY_HOST); return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixSumElts: * Returns the sum of all matrix elements. *--------------------------------------------------------------------------*/ HYPRE_Complex hypre_CSRMatrixSumElts( hypre_CSRMatrix *A ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_CSRMatrixData(A); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_nonzeros; i++) { sum += data[i]; } return sum; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixFnorm *--------------------------------------------------------------------------*/ HYPRE_Real hypre_CSRMatrixFnorm( hypre_CSRMatrix *A ) { HYPRE_Int nrows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int i; HYPRE_Complex sum = 0; hypre_assert(num_nonzeros == A_i[nrows]); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_nonzeros; ++i) { HYPRE_Complex v = A_data[i]; sum += v * v; } return sqrt(sum); } /*-------------------------------------------------------------------------- * hypre_CSRMatrixComputeRowSumHost * * type == 0, sum, * 1, abs sum * 2, square sum *--------------------------------------------------------------------------*/ void hypre_CSRMatrixComputeRowSumHost( hypre_CSRMatrix *A, HYPRE_Int *CF_i, HYPRE_Int *CF_j, HYPRE_Complex *row_sum, HYPRE_Int type, HYPRE_Complex scal, const char *set_or_add) { HYPRE_Int nrows = hypre_CSRMatrixNumRows(A); HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int i, j; for (i = 0; i < nrows; i++) { HYPRE_Complex row_sum_i = set_or_add[0] == 's' ? 0.0 : row_sum[i]; for (j = A_i[i]; j < A_i[i + 1]; j++) { if (CF_i && CF_j && CF_i[i] != CF_j[A_j[j]]) { continue; } if (type == 0) { row_sum_i += scal * A_data[j]; } else if (type == 1) { row_sum_i += scal * fabs(A_data[j]); } else if (type == 2) { row_sum_i += scal * A_data[j] * A_data[j]; } } row_sum[i] = row_sum_i; } } /*-------------------------------------------------------------------------- * hypre_CSRMatrixComputeRowSum *--------------------------------------------------------------------------*/ void hypre_CSRMatrixComputeRowSum( hypre_CSRMatrix *A, HYPRE_Int *CF_i, HYPRE_Int *CF_j, HYPRE_Complex *row_sum, HYPRE_Int type, HYPRE_Complex scal, const char *set_or_add) { hypre_assert( (CF_i && CF_j) || (!CF_i && !CF_j) ); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_CSRMatrixComputeRowSumDevice(A, CF_i, CF_j, row_sum, type, scal, set_or_add); } else #endif { hypre_CSRMatrixComputeRowSumHost(A, CF_i, CF_j, row_sum, type, scal, set_or_add); } } /*-------------------------------------------------------------------------- * hypre_CSRMatrixExtractDiagonalHost * type 0: diag * 1: abs diag * 2: diag inverse * 3: diag inverse sqrt * 4: abs diag inverse sqrt *--------------------------------------------------------------------------*/ void hypre_CSRMatrixExtractDiagonalHost( hypre_CSRMatrix *A, HYPRE_Complex *d, HYPRE_Int type) { HYPRE_Int nrows = hypre_CSRMatrixNumRows(A); HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int i, j; HYPRE_Complex d_i; for (i = 0; i < nrows; i++) { d_i = 0.0; for (j = A_i[i]; j < A_i[i + 1]; j++) { if (A_j[j] == i) { if (type == 0) { d_i = A_data[j]; } else if (type == 1) { d_i = fabs(A_data[j]); } else if (type == 2) { d_i = 1.0 / (A_data[j]); } else if (type == 3) { d_i = 1.0 / (sqrt(A_data[j])); } else if (type == 4) { d_i = 1.0 / (sqrt(fabs(A_data[j]))); } break; } } d[i] = d_i; } } /*-------------------------------------------------------------------------- * hypre_CSRMatrixExtractDiagonal * * type 0: diag * 1: abs diag * 2: diag inverse * 3: diag inverse sqrt *--------------------------------------------------------------------------*/ void hypre_CSRMatrixExtractDiagonal( hypre_CSRMatrix *A, HYPRE_Complex *d, HYPRE_Int type) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_CSRMatrixExtractDiagonalDevice(A, d, type); } else #endif { hypre_CSRMatrixExtractDiagonalHost(A, d, type); } } /* Scale CSR matrix A = scalar * A */ HYPRE_Int hypre_CSRMatrixScale( hypre_CSRMatrix *A, HYPRE_Complex scalar) { HYPRE_Complex *data = hypre_CSRMatrixData(A); HYPRE_Int i; HYPRE_Int k = hypre_CSRMatrixNumNonzeros(A); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypreDevice_Scalen(data, k, scalar); } else #endif { for (i = 0; i < k; i++) { data[i] *= scalar; } } return hypre_error_flag; } HYPRE_Int hypre_CSRMatrixSetConstantValues( hypre_CSRMatrix *A, HYPRE_Complex value) { HYPRE_Int i; HYPRE_Int nnz = hypre_CSRMatrixNumNonzeros(A); if (!hypre_CSRMatrixData(A)) { hypre_CSRMatrixData(A) = hypre_TAlloc(HYPRE_Complex, nnz, hypre_CSRMatrixMemoryLocation(A)); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypreDevice_Filln(hypre_CSRMatrixData(A), nnz, value); } else #endif { for (i = 0; i < nnz; i++) { hypre_CSRMatrixData(A)[i] = value; } } return hypre_error_flag; }
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 64; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd64_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 4u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[8][8][4]; __m128 _v5_25 = _mm_set1_ps(5.25f); __m128 _vm4_25 = _mm_set1_ps(-4.25f); __m128 _vm1_25 = _mm_set1_ps(-1.25f); __m128 _v0_25 = _mm_set1_ps(0.25f); __m128 _vm2_5 = _mm_set1_ps(-2.5f); __m128 _v0_5 = _mm_set1_ps(0.5f); __m128 _v2 = _mm_set1_ps(2.f); __m128 _v4 = _mm_set1_ps(4.f); // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _r04 = _mm_load_ps(r0 + 4 * 4); __m128 _r05 = _mm_load_ps(r0 + 4 * 5); __m128 _r06 = _mm_load_ps(r0 + 4 * 6); __m128 _r07 = _mm_load_ps(r0 + 4 * 7); __m128 _tmp0m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r04, _r02), _mm_sub_ps(_r00, _r06)); __m128 _tmp7m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r03, _r05), _mm_sub_ps(_r07, _r01)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[7][m], _tmp7m); __m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _r04, _mm_add_ps(_r02, _r06)); __m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _r03, _mm_add_ps(_r01, _r05)); __m128 _tmp1m = _mm_add_ps(_tmp12a, _tmp12b); __m128 _tmp2m = _mm_sub_ps(_tmp12a, _tmp12b); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); __m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _r04, _mm_comp_fmadd_ps(_v0_25, _r02, _r06)); __m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v0_5))); __m128 _tmp3m = _mm_add_ps(_tmp34a, _tmp34b); __m128 _tmp4m = _mm_sub_ps(_tmp34a, _tmp34b); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[4][m], _tmp4m); __m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _r04, _r02), _r06); __m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v2))); __m128 _tmp5m = _mm_add_ps(_tmp56a, _tmp56b); __m128 _tmp6m = _mm_sub_ps(_tmp56a, _tmp56b); _mm_store_ps(tmp[5][m], _tmp5m); _mm_store_ps(tmp[6][m], _tmp6m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6; float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7; for (int m = 0; m < 8; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp06 = _mm_load_ps(tmp[m][6]); __m128 _tmp07 = _mm_load_ps(tmp[m][7]); __m128 _r0tm0 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp04, _tmp02), _mm_sub_ps(_tmp00, _tmp06)); __m128 _r0tm7 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp03, _tmp05), _mm_sub_ps(_tmp07, _tmp01)); __m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _tmp04, _mm_add_ps(_tmp02, _tmp06)); __m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _tmp03, _mm_add_ps(_tmp01, _tmp05)); __m128 _r0tm1 = _mm_add_ps(_tmp12a, _tmp12b); __m128 _r0tm2 = _mm_sub_ps(_tmp12a, _tmp12b); __m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _tmp04, _mm_comp_fmadd_ps(_v0_25, _tmp02, _tmp06)); __m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v0_5))); __m128 _r0tm3 = _mm_add_ps(_tmp34a, _tmp34b); __m128 _r0tm4 = _mm_sub_ps(_tmp34a, _tmp34b); __m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _tmp04, _tmp02), _tmp06); __m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v2))); __m128 _r0tm5 = _mm_add_ps(_tmp56a, _tmp56b); __m128 _r0tm6 = _mm_sub_ps(_tmp56a, _tmp56b); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); _mm_store_ps(r0_tm_4, _r0tm4); _mm_store_ps(r0_tm_5, _r0tm5); _mm_store_ps(r0_tm_6, _r0tm6); _mm_store_ps(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 4 * 8; r0_tm_1 += tiles * 4 * 8; r0_tm_2 += tiles * 4 * 8; r0_tm_3 += tiles * 4 * 8; r0_tm_4 += tiles * 4 * 8; r0_tm_5 += tiles * 4 * 8; r0_tm_6 += tiles * 4 * 8; r0_tm_7 += tiles * 4 * 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x12 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); __m128 _r8 = _mm_load_ps(r0 + 4 * 8); __m128 _r9 = _mm_load_ps(r0 + 4 * 9); __m128 _ra = _mm_load_ps(r0 + 4 * 10); __m128 _rb = _mm_load_ps(r0 + 4 * 11); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r8); _mm_store_ps(tmpptr + 4 * 3, _r1); _mm_store_ps(tmpptr + 4 * 4, _r5); _mm_store_ps(tmpptr + 4 * 5, _r9); _mm_store_ps(tmpptr + 4 * 6, _r2); _mm_store_ps(tmpptr + 4 * 7, _r6); _mm_store_ps(tmpptr + 4 * 8, _ra); _mm_store_ps(tmpptr + 4 * 9, _r3); _mm_store_ps(tmpptr + 4 * 10, _r7); _mm_store_ps(tmpptr + 4 * 11, _rb); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r1); _mm_store_ps(tmpptr + 4 * 3, _r5); _mm_store_ps(tmpptr + 4 * 4, _r2); _mm_store_ps(tmpptr + 4 * 5, _r6); _mm_store_ps(tmpptr + 4 * 6, _r3); _mm_store_ps(tmpptr + 4 * 7, _r7); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r1); _mm_store_ps(tmpptr + 4 * 2, _r2); _mm_store_ps(tmpptr + 4 * 3, _r3); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r01_0 = _mm_unpacklo_ps(_r0, _r1); __m128 _r01_1 = _mm_unpackhi_ps(_r0, _r1); _mm_store_ps(tmpptr, _r01_0); _mm_store_ps(tmpptr + 4, _r01_1); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { __m128 _val = _mm_load_ps(r0); _mm_store_ps(tmpptr, _val); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); __m128 _sum8 = _mm_setzero_ps(); __m128 _sum9 = _mm_setzero_ps(); __m128 _suma = _mm_setzero_ps(); __m128 _sumb = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); __m128 _val8 = _mm_load1_ps(r0 + 8); __m128 _val9 = _mm_load1_ps(r0 + 9); __m128 _vala = _mm_load1_ps(r0 + 10); __m128 _valb = _mm_load1_ps(r0 + 11); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); _sum8 = _mm_comp_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm_comp_fmadd_ps(_val9, _w0, _sum9); _suma = _mm_comp_fmadd_ps(_vala, _w0, _suma); _sumb = _mm_comp_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); _mm_store_ps(output0_tm + 4 * 8, _sum8); _mm_store_ps(output0_tm + 4 * 9, _sum9); _mm_store_ps(output0_tm + 4 * 10, _suma); _mm_store_ps(output0_tm + 4 * 11, _sumb); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); _sum = _mm_comp_fmadd_ps(_val0, _w0, _sum); r0 += 1; k0 += 4; } _mm_store_ps(output0_tm, _sum); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; __m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[6][8][4]; __m128 _v32 = _mm_set1_ps(32.f); __m128 _v16 = _mm_set1_ps(16.f); __m128 _v8 = _mm_set1_ps(8.f); __m128 _v4 = _mm_set1_ps(4.f); __m128 _v2 = _mm_set1_ps(2.f); // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7; float* output0 = out0.row<float>(i * 6) + (j * 6) * 4; // TODO msa optimize for (int m = 0; m < 8; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _out0tm4 = _mm_load_ps(output0_tm_4); __m128 _out0tm5 = _mm_load_ps(output0_tm_5); __m128 _out0tm6 = _mm_load_ps(output0_tm_6); __m128 _out0tm7 = _mm_load_ps(output0_tm_7); __m128 _tmp024a = _mm_add_ps(_out0tm1, _out0tm2); __m128 _tmp135a = _mm_sub_ps(_out0tm1, _out0tm2); __m128 _tmp024b = _mm_add_ps(_out0tm3, _out0tm4); __m128 _tmp135b = _mm_sub_ps(_out0tm3, _out0tm4); __m128 _tmp024c = _mm_add_ps(_out0tm5, _out0tm6); __m128 _tmp135c = _mm_sub_ps(_out0tm5, _out0tm6); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b)); __m128 _tmp2m = _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a)); __m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[4][m], _tmp4m); __m128 _tmp1m = _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a)); __m128 _tmp3m = _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a)); __m128 _tmp5m = _mm_add_ps(_mm_add_ps(_out0tm7, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c)); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 4 * 8; output0_tm_1 += tiles * 4 * 8; output0_tm_2 += tiles * 4 * 8; output0_tm_3 += tiles * 4 * 8; output0_tm_4 += tiles * 4 * 8; output0_tm_5 += tiles * 4 * 8; output0_tm_6 += tiles * 4 * 8; output0_tm_7 += tiles * 4 * 8; } for (int m = 0; m < 6; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp06 = _mm_load_ps(tmp[m][6]); __m128 _tmp07 = _mm_load_ps(tmp[m][7]); __m128 _tmp024a = _mm_add_ps(_tmp01, _tmp02); __m128 _tmp135a = _mm_sub_ps(_tmp01, _tmp02); __m128 _tmp024b = _mm_add_ps(_tmp03, _tmp04); __m128 _tmp135b = _mm_sub_ps(_tmp03, _tmp04); __m128 _tmp024c = _mm_add_ps(_tmp05, _tmp06); __m128 _tmp135c = _mm_sub_ps(_tmp05, _tmp06); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b))); __m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a))); __m128 _out04 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a))); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4 * 2, _out02); _mm_store_ps(output0 + 4 * 4, _out04); __m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a))); __m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a))); __m128 _out05 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp07, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c))); _mm_store_ps(output0 + 4, _out01); _mm_store_ps(output0 + 4 * 3, _out03); _mm_store_ps(output0 + 4 * 5, _out05); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 36; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd42_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 4u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[6][6][4]; __m128 _vm5 = _mm_set1_ps(-5.f); __m128 _vm4 = _mm_set1_ps(-4.f); __m128 _v4 = _mm_set1_ps(4.f); __m128 _vm2 = _mm_set1_ps(-2.f); __m128 _v2 = _mm_set1_ps(2.f); // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _r04 = _mm_load_ps(r0 + 4 * 4); __m128 _r05 = _mm_load_ps(r0 + 4 * 5); __m128 _tmp0m = _mm_comp_fmadd_ps(_vm5, _r02, _mm_comp_fmadd_ps(_v4, _r00, _r04)); __m128 _tmp1m = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_r01, _r02), _mm_add_ps(_r04, _r03)); __m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_r01, _r02), _mm_sub_ps(_r04, _r03)); __m128 _tmp3m = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02)); __m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02)); __m128 _tmp5m = _mm_comp_fmadd_ps(_vm5, _r03, _mm_comp_fmadd_ps(_v4, _r01, _r05)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[4][m], _tmp4m); _mm_store_ps(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; for (int m = 0; m < 6; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _r0tm0 = _mm_comp_fmadd_ps(_vm5, _tmp02, _mm_comp_fmadd_ps(_v4, _tmp00, _tmp04)); __m128 _r0tm1 = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_tmp01, _tmp02), _mm_add_ps(_tmp04, _tmp03)); __m128 _r0tm2 = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_tmp01, _tmp02), _mm_sub_ps(_tmp04, _tmp03)); __m128 _r0tm3 = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02)); __m128 _r0tm4 = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02)); __m128 _r0tm5 = _mm_comp_fmadd_ps(_vm5, _tmp03, _mm_comp_fmadd_ps(_v4, _tmp01, _tmp05)); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); _mm_store_ps(r0_tm_4, _r0tm4); _mm_store_ps(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 4 * 6; r0_tm_1 += tiles * 4 * 6; r0_tm_2 += tiles * 4 * 6; r0_tm_3 += tiles * 4 * 6; r0_tm_4 += tiles * 4 * 6; r0_tm_5 += tiles * 4 * 6; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x12 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); __m128 _r8 = _mm_load_ps(r0 + 4 * 8); __m128 _r9 = _mm_load_ps(r0 + 4 * 9); __m128 _ra = _mm_load_ps(r0 + 4 * 10); __m128 _rb = _mm_load_ps(r0 + 4 * 11); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r8); _mm_store_ps(tmpptr + 4 * 3, _r1); _mm_store_ps(tmpptr + 4 * 4, _r5); _mm_store_ps(tmpptr + 4 * 5, _r9); _mm_store_ps(tmpptr + 4 * 6, _r2); _mm_store_ps(tmpptr + 4 * 7, _r6); _mm_store_ps(tmpptr + 4 * 8, _ra); _mm_store_ps(tmpptr + 4 * 9, _r3); _mm_store_ps(tmpptr + 4 * 10, _r7); _mm_store_ps(tmpptr + 4 * 11, _rb); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r1); _mm_store_ps(tmpptr + 4 * 3, _r5); _mm_store_ps(tmpptr + 4 * 4, _r2); _mm_store_ps(tmpptr + 4 * 5, _r6); _mm_store_ps(tmpptr + 4 * 6, _r3); _mm_store_ps(tmpptr + 4 * 7, _r7); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r1); _mm_store_ps(tmpptr + 4 * 2, _r2); _mm_store_ps(tmpptr + 4 * 3, _r3); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r01_0 = _mm_unpacklo_ps(_r0, _r1); __m128 _r01_1 = _mm_unpackhi_ps(_r0, _r1); _mm_store_ps(tmpptr, _r01_0); _mm_store_ps(tmpptr + 4, _r01_1); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { __m128 _val = _mm_load_ps(r0); _mm_store_ps(tmpptr, _val); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); __m128 _sum8 = _mm_setzero_ps(); __m128 _sum9 = _mm_setzero_ps(); __m128 _suma = _mm_setzero_ps(); __m128 _sumb = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); __m128 _val8 = _mm_load1_ps(r0 + 8); __m128 _val9 = _mm_load1_ps(r0 + 9); __m128 _vala = _mm_load1_ps(r0 + 10); __m128 _valb = _mm_load1_ps(r0 + 11); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); _sum8 = _mm_comp_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm_comp_fmadd_ps(_val9, _w0, _sum9); _suma = _mm_comp_fmadd_ps(_vala, _w0, _suma); _sumb = _mm_comp_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); _mm_store_ps(output0_tm + 4 * 8, _sum8); _mm_store_ps(output0_tm + 4 * 9, _sum9); _mm_store_ps(output0_tm + 4 * 10, _suma); _mm_store_ps(output0_tm + 4 * 11, _sumb); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row<const float>(r); int nn = inch * 4; // inch always > 0 __m128 _sum = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); _sum = _mm_comp_fmadd_ps(_val0, _w0, _sum); r0 += 1; k0 += 4; } _mm_store_ps(output0_tm, _sum); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; __m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[4][6][4]; __m128 _v2 = _mm_set1_ps(2.f); __m128 _v4 = _mm_set1_ps(4.f); __m128 _v8 = _mm_set1_ps(8.f); // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; float* output0 = out0.row<float>(i * 4) + (j * 4) * 4; // TODO msa optimize for (int m = 0; m < 6; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _out0tm4 = _mm_load_ps(output0_tm_4); __m128 _out0tm5 = _mm_load_ps(output0_tm_5); __m128 _tmp02a = _mm_add_ps(_out0tm1, _out0tm2); __m128 _tmp13a = _mm_sub_ps(_out0tm1, _out0tm2); __m128 _tmp02b = _mm_add_ps(_out0tm3, _out0tm4); __m128 _tmp13b = _mm_sub_ps(_out0tm3, _out0tm4); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp02a), _tmp02b); __m128 _tmp1m = _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a); __m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a); __m128 _tmp3m = _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_out0tm5, _tmp13a)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 4 * 6; output0_tm_1 += tiles * 4 * 6; output0_tm_2 += tiles * 4 * 6; output0_tm_3 += tiles * 4 * 6; output0_tm_4 += tiles * 4 * 6; output0_tm_5 += tiles * 4 * 6; } for (int m = 0; m < 4; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp02a = _mm_add_ps(_tmp01, _tmp02); __m128 _tmp13a = _mm_sub_ps(_tmp01, _tmp02); __m128 _tmp02b = _mm_add_ps(_tmp03, _tmp04); __m128 _tmp13b = _mm_sub_ps(_tmp03, _tmp04); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp02a), _tmp02b)); __m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a)); __m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a)); __m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_tmp05, _tmp13a))); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4, _out01); _mm_store_ps(output0 + 4 * 2, _out02); _mm_store_ps(output0 + 4 * 3, _out03); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
GB_unaryop__minv_int32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int32_uint16 // op(A') function: GB_tran__minv_int32_uint16 // C type: int32_t // A type: uint16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int32_uint16 ( int32_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pcptdesdecryptcfbcaomp.c
/******************************************************************************* * Copyright 2002-2018 Intel Corporation * All Rights Reserved. * * If this software was obtained under the Intel Simplified Software License, * the following terms apply: * * The source code, information and material ("Material") contained herein is * owned by Intel Corporation or its suppliers or licensors, and title to such * Material remains with Intel Corporation or its suppliers or licensors. The * Material contains proprietary information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright laws and treaty * provisions. No part of the Material may be used, copied, reproduced, * modified, published, uploaded, posted, transmitted, distributed or disclosed * in any way without Intel's prior express written permission. No license under * any patent, copyright or other intellectual property rights in the Material * is granted to or conferred upon you, either expressly, by implication, * inducement, estoppel or otherwise. Any license under such intellectual * property rights must be express and approved by Intel in writing. * * Unless otherwise agreed by Intel in writing, you may not remove or alter this * notice or any other notice embedded in Materials by Intel or Intel's * suppliers or licensors in any way. * * * If this software was obtained under the Apache License, Version 2.0 (the * "License"), the following terms apply: * * You may not use this file except in compliance with the License. You may * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // Name: // ippsTDESDecryptCFB // // Purpose: // Cryptography Primitives. // Decrypt byte data stream according to TDES. // // */ #include "owndefs.h" #if defined ( _OPENMP ) #include "owncp.h" #include "pcpdes.h" #include "pcptool.h" #include "omp.h" /*F* // Name: // ippsTDESDecryptCFB // // Purpose: // Decrypt byte data stream according to DES in CFB mode using OpenMP API. // // Returns: // ippStsNoErr No errors, it's OK // ippStsNullPtrErr ( pCtx1 == NULL ) || ( pCtx2 == NULL ) || // ( pCtx3 == NULL ) || ( pSrc == NULL ) || // ( pDst == NULL ) || ( pIV == NULL ) // ippStsLengthErr srcLen < 1 // ippStsCFBSizeErr 1 > cfbBlkSize > 8 // ippStsContextMatchErr ( pCtx1->idCtx != idCtxDES ) || // ( pCtx2->idCtx != idCtxDES ) || // ( pCtx3->idCtx != idCtxDES ) // ippStsUnderRunErr ( srcLen % cfBlkSize ) != 0 // // Parameters: // pSrc Pointer to the input ciphertext data stream. // pDst Pointer to the resulting plaintext data stream. // srcLen Plaintext data stream length. // cfbBlkSize Plaintext data stream size in bytes. // pCtx Pointer to the IppsDESSpec context. // pIV Pointer to the initilization vector. // padding Padding scheme indicator. // // Notes: // An encryption function is used to decrypt a cipher text, // i.e. an encryption key schedule is utilized. *F*/ static void TDES_CFB_processing(const Ipp8u* pIV, const Ipp8u* pSrc, Ipp8u* pDst, int nBlocks, int cfbBlkSize, const IppsDESSpec* pCtx1, const IppsDESSpec* pCtx2, const IppsDESSpec* pCtx3) { Ipp64u tmpInp[2]; Ipp64u tmpOut; /* copy IV */ CopyBlock8(pIV, tmpInp); /* decrypt data block-by-block of cfbLen each */ while(nBlocks) { int n; /* decryption */ tmpOut = Cipher_DES(tmpInp[0], DES_EKEYS(pCtx1), DESspbox); tmpOut = Cipher_DES(tmpOut, DES_DKEYS(pCtx2), DESspbox); tmpOut = Cipher_DES(tmpOut, DES_EKEYS(pCtx3), DESspbox); /* store output and put feedback into the input buffer (tmpInp) */ for(n=0; n<cfbBlkSize; n++) { ((Ipp8u*)(tmpInp+1))[n] = pSrc[n]; pDst[n] = (Ipp8u)( ((Ipp8u*)&tmpOut)[n] ^ pSrc[n] ); } /* shift input buffer (tmpInp) for the next CFB operation */ if(MBS_DES==cfbBlkSize) tmpInp[0] = tmpInp[1]; else #if (IPP_ENDIAN == IPP_BIG_ENDIAN) tmpInp[0] = LSL64(tmpInp[0], cfbBlkSize*8) |LSR64(tmpInp[1], 64-cfbBlkSize*8); #else tmpInp[0] = LSR64(tmpInp[0], cfbBlkSize*8) |LSL64(tmpInp[1], 64-cfbBlkSize*8); #endif pSrc += cfbBlkSize; pDst += cfbBlkSize; nBlocks--; } } IPPFUN(IppStatus, ippsTDESDecryptCFB,(const Ipp8u* pSrc, Ipp8u* pDst, int srcLen, int cfbBlkSize, const IppsDESSpec* pCtx1, const IppsDESSpec* pCtx2, const IppsDESSpec* pCtx3, const Ipp8u* pIV, IppsPadding padding)) { /* test context */ IPP_BAD_PTR3_RET(pCtx1, pCtx2, pCtx3); /* use aligned DES context */ pCtx1 = (IppsDESSpec*)(IPP_ALIGNED_PTR(pCtx1, DES_ALIGNMENT)); pCtx2 = (IppsDESSpec*)(IPP_ALIGNED_PTR(pCtx2, DES_ALIGNMENT)); pCtx3 = (IppsDESSpec*)(IPP_ALIGNED_PTR(pCtx3, DES_ALIGNMENT)); IPP_BAD_PTR3_RET(pSrc, pDst, pIV); IPP_BADARG_RET(!DES_ID_TEST(pCtx1), ippStsContextMatchErr); IPP_BADARG_RET(!DES_ID_TEST(pCtx2), ippStsContextMatchErr); IPP_BADARG_RET(!DES_ID_TEST(pCtx3), ippStsContextMatchErr); /* test stream length */ IPP_BADARG_RET((srcLen<1), ippStsLengthErr); /* test CFB value */ IPP_BADARG_RET(((1>cfbBlkSize) || (MBS_DES<cfbBlkSize)), ippStsCFBSizeErr); /* test stream integrity */ IPP_BADARG_RET((srcLen%cfbBlkSize), ippStsUnderRunErr); UNREFERENCED_PARAMETER(padding); { int nBlocks = srcLen / cfbBlkSize; int nThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), IPP_MAX(nBlocks/TDES_MIN_BLK_PER_THREAD, 1)); if(1==nThreads) TDES_CFB_processing(pIV, pSrc, pDst, nBlocks, cfbBlkSize, pCtx1, pCtx2, pCtx3); else { int blksThreadReg; int blksThreadTail; int srcBlkSize; int ivBlkSize; Ipp8u locIV[MBS_DES*DEFAULT_CPU_NUM]; #if defined(__INTEL_COMPILER) Ipp8u* pLocIV = nThreads>DEFAULT_CPU_NUM? kmp_malloc(nThreads*MBS_DES) : locIV; #else Ipp8u* pLocIV = nThreads>DEFAULT_CPU_NUM ? malloc(nThreads*MBS_DES) : locIV; #endif if(pLocIV) { #pragma omp parallel IPPCP_OMP_LIMIT_MAX_NUM_THREADS(nThreads) { #pragma omp master { int nt; nThreads = omp_get_num_threads(); blksThreadReg = nBlocks / nThreads; blksThreadTail = blksThreadReg + nBlocks % nThreads; srcBlkSize = blksThreadReg*cfbBlkSize; ivBlkSize = IPP_MIN(MBS_DES,srcBlkSize); CopyBlock8(pIV, pLocIV+0); for(nt=1; nt<nThreads; nt++) CopyBlock(pSrc+nt*srcBlkSize-ivBlkSize, pLocIV+MBS_DES+(nt-1)*ivBlkSize, ivBlkSize); } #pragma omp barrier { int id = omp_get_thread_num(); Ipp8u* pThreadIV = pLocIV + id*ivBlkSize; Ipp8u* pThreadSrc = (Ipp8u*)pSrc + id*srcBlkSize; Ipp8u* pThreadDst = (Ipp8u*)pDst + id*srcBlkSize; int blkThread = (id==(nThreads-1))? blksThreadTail : blksThreadReg; TDES_CFB_processing(pThreadIV, pThreadSrc, pThreadDst, blkThread, cfbBlkSize, pCtx1, pCtx2, pCtx3); } } if(pLocIV != locIV) #if defined(__INTEL_COMPILER) kmp_free(pLocIV); #else free(pLocIV); #endif } else return ippStsMemAllocErr; } return ippStsNoErr; } } #endif /* #ifdef _OPENMP */
GB_binop__first_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__first_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__first_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__first_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fp64) // A*D function (colscale): GB (_AxD__first_fp64) // D*A function (rowscale): GB (_DxB__first_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__first_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__first_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fp64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FP64 || GxB_NO_FIRST_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
pt_to_pt_pingping.c
/***************************************************************************** * * * Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 * * * * produced by * * * * Mark Bull, Jim Enright and Fiona Reid * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk * * * * * * Copyright 2012, The University of Edinburgh * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ /*-----------------------------------------------------------*/ /* Contains the point-to-point pingping mixed mode */ /* OpenMP/MPI benchmarks. */ /* This includes: -masteronly pingping */ /* -funnelled pingping */ /* -multiple pingping */ /*-----------------------------------------------------------*/ #include "pt_to_pt_pingping.h" /*-----------------------------------------------------------*/ /* pingPing */ /* */ /* Driver subroutine for the pingping benchmark. */ /*-----------------------------------------------------------*/ int pingPing(int benchmarkType) { int dataSizeIter; int sameNode; pingRankA = PPRanks[0]; pingRankB = PPRanks[1]; /* Check if pingRankA and pingRankB are on the same node */ sameNode = compareProcNames(pingRankA, pingRankB); if (myMPIRank == 0) { /* print message saying if benchmark is inter or intra node */ printNodeReport(sameNode, pingRankA, pingRankB); /* then print report column headings. */ printBenchHeader(); } /* initialise repsToDo to defaultReps at start of benchmark */ repsToDo = defaultReps; /* Loop over data sizes */ dataSizeIter = minDataSize; /* initialise dataSizeIter to minDataSize */ while (dataSizeIter <= maxDataSize) { /* set sizeofBuffer */ sizeofBuffer = dataSizeIter * numThreads; /* Allocate space for main data arrays */ allocatePingpingData(sizeofBuffer); /* warm-up for benchmarkType */ if (benchmarkType == MASTERONLY) { /* Masteronly warmp sweep */ masteronlyPingping(warmUpIters, dataSizeIter); } else if (benchmarkType == FUNNELLED) { /* perform funnelled warm-up sweep */ funnelledPingping(warmUpIters, dataSizeIter); } else if (benchmarkType == MULTIPLE) { multiplePingping(warmUpIters, dataSizeIter); } /* perform verification test for the pingping */ testPingping(sizeofBuffer, dataSizeIter); /* Initialise benchmark */ benchComplete = FALSE; /* keep executing benchmark until target time is reached */ while (benchComplete != TRUE) { /* Start the timer...MPI_Barrier to synchronise */ MPI_Barrier(comm); startTime = MPI_Wtime(); if (benchmarkType == MASTERONLY) { /* execute for repsToDo repetitions */ masteronlyPingping(repsToDo, dataSizeIter); } else if (benchmarkType == FUNNELLED) { funnelledPingping(repsToDo, dataSizeIter); } else if (benchmarkType == MULTIPLE) { multiplePingping(repsToDo, dataSizeIter); } /* Stop the timer...MPI_Barrier to synchronise processes */ MPI_Barrier(comm); finishTime = MPI_Wtime(); totalTime = finishTime - startTime; /* Call repTimeCheck function to test if target time is reached */ if (myMPIRank == 0) { benchComplete = repTimeCheck(totalTime, repsToDo); } /* Ensure all procs have the same value of benchComplete */ /* and repsToDo */ MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm); MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm); } /* Master process sets benchmark results */ if (myMPIRank == 0) { setReportParams(dataSizeIter, repsToDo, totalTime); printReport(); } /* Free the allocated space for the main data arrays */ freePingpingData(); /* Update dataSize before the next iteration */ dataSizeIter = dataSizeIter * 2; /* double data size */ } return 0; } /*-----------------------------------------------------------*/ /* masteronlyPingping */ /* */ /* Two processes send a message to each other using the */ /* MPI_Isend, MPI_Recv and MPI_Wait routines. */ /* Inter-process communication takes place outside of the */ /* parallel region. */ /*-----------------------------------------------------------*/ int masteronlyPingping(int totalReps, int dataSize) { int repIter, i; int destRank; /* set destRank to ID of other process */ if (myMPIRank == pingRankA) { destRank = pingRankB; } else if (myMPIRank == pingRankB) { destRank = pingRankA; } for (repIter = 0; repIter < totalReps; repIter++) { if (myMPIRank == pingRankA || myMPIRank == pingRankB) { /* Each thread writes its globalID to pingSendBuf * using a PARALLEL DO directive. */ #pragma omp parallel for default(none) private(i) \ shared(pingSendBuf, dataSize, sizeofBuffer, globalIDarray) \ schedule(static, dataSize) for (i = 0; i < sizeofBuffer; i++) { pingSendBuf[i] = globalIDarray[myThreadID]; } /* Process calls non-bloacking send to start transfer of pingSendBuf * to other process. */ MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, TAG, comm, &requestID); /* Process then waits for message from other process. */ MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, TAG, comm, &status); /* Finish the Send operation with an MPI_Wait */ MPI_Wait(&requestID, &status); /* Each thread under the MPI process now reads its part of the * received buffer. */ #pragma omp parallel for default(none) private(i) \ shared(finalRecvBuf, dataSize, sizeofBuffer, pingRecvBuf) \ schedule(static, dataSize) for (i = 0; i < sizeofBuffer; i++) { finalRecvBuf[i] = pingRecvBuf[i]; } } } return 0; } /*-----------------------------------------------------------*/ /* funnelledPingPing */ /* */ /* Two processes send a message to each other using the */ /* MPI_Isend, MPI_Recv and MPI_Wait routines. */ /* Inter-process communication takes place inside the */ /* OpenMP parallel region. */ /*-----------------------------------------------------------*/ int funnelledPingping(int totalReps, int dataSize) { int repIter, i; int destRank; /* set destRank to ID of other process */ if (myMPIRank == pingRankA) { destRank = pingRankB; } else if (myMPIRank == pingRankB) { destRank = pingRankA; } /* Open the parallel region */ #pragma omp parallel default(none) private(i, repIter) \ shared(dataSize, sizeofBuffer, pingSendBuf, globalIDarray) \ shared(pingRecvBuf, finalRecvBuf, status, requestID) \ shared(destRank, comm, myMPIRank, pingRankA, pingRankB, totalReps) for (repIter = 0; repIter < totalReps; repIter++) { if (myMPIRank == pingRankA || myMPIRank == pingRankB) { /* Each thread writes its globalID to its part of * pingSendBuf. */ #pragma omp for schedule(static, dataSize) for (i = 0; i < sizeofBuffer; i++) { pingSendBuf[i] = globalIDarray[myThreadID]; } /* Implicit barrier here takes care of necessary synchronisation */ #pragma omp master { /* Master thread starts send of buffer */ MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, TAG, comm, &requestID); /* then waits for message from other process */ MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, TAG, comm, &status); /* Master thread then completes send using an MPI_Wait */ MPI_Wait(&requestID, &status); } /* Barrier needed to ensure master thread has completed transfer */ #pragma omp barrier /* Each thread reads its part of the received buffer */ #pragma omp for schedule(static, dataSize) for (i = 0; i < sizeofBuffer; i++) { finalRecvBuf[i] = pingRecvBuf[i]; } } } return 0; } /*-----------------------------------------------------------*/ /* multiplePingping */ /* */ /* With this algorithm multiple threads take place in the */ /* communication and computation. */ /* Each thread sends its portion of the pingSendBuf to the */ /* other process using MPI_Isend/ MPI_Recv/ MPI_Wait */ /* routines. */ /*-----------------------------------------------------------*/ int multiplePingping(int totalReps, int dataSize) { int repIter, i; int destRank; int lBound; /* set destRank to ID of other process */ if (myMPIRank == pingRankA) { destRank = pingRankB; } else if (myMPIRank == pingRankB) { destRank = pingRankA; } /* Open parallel region */ #pragma omp parallel default(none) private(i, lBound, requestID, status, \ repIter) \ shared(pingSendBuf, pingRecvBuf, finalRecvBuf, sizeofBuffer) \ shared(destRank, myMPIRank, pingRankA, pingRankB, totalReps) \ shared(dataSize, globalIDarray, comm) { for (repIter = 0; repIter < totalReps; repIter++) { if (myMPIRank == pingRankA || myMPIRank == pingRankB) { /* Calculate the lower bound of each threads * portion of the data arrays. */ lBound = (myThreadID * dataSize); /* Each thread writes to its part of pingSendBuf */ #pragma omp for nowait schedule(static, dataSize) for (i = 0; i < sizeofBuffer; i++) { pingSendBuf[i] = globalIDarray[myThreadID]; } /* Each thread starts send of dataSize items of * pingSendBuf to process with rank = destRank. */ MPI_Isend(&pingSendBuf[lBound], dataSize, MPI_INT, destRank, myThreadID, comm, &requestID); /* Thread then waits for message from destRank with * tag equal to it thread id. */ MPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, destRank, myThreadID, comm, &status); /* Thread completes send using MPI_Wait */ MPI_Wait(&requestID, &status); /* Each thread reads its part of received buffer. */ #pragma omp for nowait schedule(static, dataSize) for (i = 0; i < sizeofBuffer; i++) { finalRecvBuf[i] = pingRecvBuf[i]; } } } } return 0; } /*-----------------------------------------------------------*/ /* allocatePingpingData */ /* */ /* Allocates space for the main data arrays. */ /* Size of each array is specified by subroutine argument. */ /*-----------------------------------------------------------*/ int allocatePingpingData(int sizeofBuffer) { pingSendBuf = (int *)malloc(sizeofBuffer * sizeof(int)); pingRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int)); finalRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int)); return 0; } /*-----------------------------------------------------------*/ /* freePingpingData */ /* */ /* Deallocates the storage space for the main data arrays. */ /*-----------------------------------------------------------*/ int freePingpingData() { free(pingSendBuf); free(pingRecvBuf); free(finalRecvBuf); return 0; } /*-----------------------------------------------------------*/ /* testPingping */ /* */ /* Verifies that the PingPing benchmark worked correctly. */ /*-----------------------------------------------------------*/ int testPingping(int sizeofBuffer, int dataSize) { int otherPingRank, i, testFlag, reduceFlag; int *testBuf; /* initialise testFlag to true (test passed) */ testFlag = TRUE; /* Testing only needs to be done by pingRankA & pingRankB */ if (myMPIRank == pingRankA || myMPIRank == pingRankB) { /* allocate space for testBuf */ testBuf = (int *)malloc(sizeofBuffer * sizeof(int)); /* set the ID of other pingRank */ if (myMPIRank == pingRankA) { otherPingRank = pingRankB; } else if (myMPIRank == pingRankB) { otherPingRank = pingRankA; } /* construct testBuf array with correct values. * These are the values that should be in finalRecvBuf. */ #pragma omp parallel for default(none) private(i) \ shared(otherPingRank, numThreads, testBuf, dataSize, sizeofBuffer) \ schedule(static, dataSize) for (i = 0; i < sizeofBuffer; i++) { /* calculate globalID of thread expected in finalRecvBuf * This is done by using otherPingRank */ testBuf[i] = (otherPingRank * numThreads) + myThreadID; } /* compare each element of testBuf and finalRecvBuf */ for (i = 0; i < sizeofBuffer; i++) { if (testBuf[i] != finalRecvBuf[i]) { testFlag = FALSE; } } /* free space for testBuf */ free(testBuf); } MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm); /* Master process sets the testOutcome using testFlag. */ if (myMPIRank == 0) { setTestOutcome(reduceFlag); } return 0; }
DRB016-outputdep-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The loop in this example cannot be parallelized. This pattern has two pair of dependencies: 1. loop carried output dependence x = .. : 2. loop carried true dependence due to: .. = x; x = ..; Data race pairs: we allow two pairs to preserve the original code pattern. 1. x@73:12 vs. x@74:5 2. x@74:5 vs. x@74:5 */ #include "omprace.h" #include <omp.h> #include <stdio.h> int a[100]; int main() { omprace_init(); int len=100; int i,x=10; #pragma omp parallel for for (i=0;i<len;i++) { a[i] = x; x=i; } printf("x=%d",x); omprace_fini(); return 0; }
symm_x_coo_u_lo_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #define CACHELINE 64 alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = mat->rows; ALPHA_INT n = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < m; ++r) for (ALPHA_INT c = 0; c < n; c++) { ALPHA_Number tmp1, tmp2; alpha_mul(tmp1, y[index2(r, c, ldy)], beta); alpha_mul(tmp2, x[index2(r, c, ldx)], alpha); alpha_add(y[index2(r, c, ldy)], tmp1, tmp2); } ALPHA_INT block_size = CACHELINE / sizeof(ALPHA_Number); ALPHA_INT block_num = (columns + block_size - 1) / block_size; if (num_threads > block_num) num_threads = block_num; #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT bcl = cross_block_low(tid, num_threads, block_num) * block_size; ALPHA_INT bch = cross_block_high(tid, num_threads, block_num) * block_size; if (bch > columns) bch = columns; for (ALPHA_INT ai = 0; ai < mat->nnz; ai++) { ALPHA_INT ac = mat->col_indx[ai]; ALPHA_INT r = mat->row_indx[ai]; if (ac < r) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(ac, c, ldy)], val, x[index2(r, c, ldx)]); } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
opi.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int main(int argc, char **argv) { //seed random number generator // Q2b: get the number of threads to run with from agrv and // add OpenMP API code to set number of threads here int Nthreads = atoi(argv[1]); omp_set_num_threads(Nthreads); struct drand48_data *drandData; drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data)); // Q2c: add an OpenMP parallel region here, wherein each thread initializes // one entry in drandData using srand48_r and seed based on thread number #pragma omp parallel { long int seed = omp_get_thread_num(); int rank = omp_get_thread_num(); srand48_r(seed, drandData+rank); } long long int Ntrials = 10000000; //need running tallies long long int Ntotal=0; long long int Ncircle=0; double time = omp_get_wtime(); #pragma omp parallel for reduction(+:Ncircle) reduction(+:Ntotal) for (long long int n=0; n<Ntrials; n++) { double rand1; double rand2; int rank = omp_get_thread_num(); //generate two random numbers (use the thread id to offset drandData) drand48_r(drandData+rank, &rand1); drand48_r(drandData+rank, &rand2); double x = -1 + 2*rand1; //shift to [-1,1] double y = -1 + 2*rand2; //check if its in the circle if (sqrt(x*x+y*y)<=1) Ncircle++; Ntotal++; if (n%100 ==0) { double pi = 4.0*Ncircle/ (double) (n); //printf("Our estimate of pi is %g \n", pi); } } double pi = 4.0*Ncircle/ (double) (Ntotal); time = omp_get_wtime() - time; printf("Our final estimate of pi is %lf, it took %lf seconds. \n", pi, time); free(drandData); return 0; }
rom_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Raul Bravo // #if !defined(KRATOS_ROM_BUILDER_AND_SOLVER) #define KRATOS_ROM_BUILDER_AND_SOLVER /* System includes */ /* External includes */ /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" /* Application includes */ #include "rom_application_variables.h" namespace Kratos { template <class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ROMBuilderAndSolver : public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: /** * This struct is used in the component wise calculation only * is defined here and is used to declare a member variable in the component wise builder and solver * private pointers can only be accessed by means of set and get functions * this allows to set and not copy the Element_Variables and Condition_Variables * which will be asked and set by another strategy object */ //pointer definition KRATOS_CLASS_POINTER_DEFINITION(ROMBuilderAndSolver); // The size_t types typedef std::size_t SizeType; typedef std::size_t IndexType; /// Definition of the classes from the base class typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; /// Additional definitions typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType; typedef Element::EquationIdVectorType EquationIdVectorType; typedef Element::DofsVectorType DofsVectorType; typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType; /// DoF types definition typedef Node<3> NodeType; typedef typename NodeType::DofType DofType; typedef typename DofType::Pointer DofPointerType; /*@} */ /**@name Life Cycle */ /*@{ */ /** * @brief Default constructor. (with parameters) */ explicit ROMBuilderAndSolver(typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters) : BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver) { // Validate default parameters Parameters default_parameters = Parameters(R"( { "nodal_unknowns" : [], "number_of_rom_dofs" : 10 })"); ThisParameters.ValidateAndAssignDefaults(default_parameters); // We set the other member variables mpLinearSystemSolver = pNewLinearSystemSolver; mNodalVariablesNames = ThisParameters["nodal_unknowns"].GetStringArray(); mNodalDofs = mNodalVariablesNames.size(); mRomDofs = ThisParameters["number_of_rom_dofs"].GetInt(); // Setting up mapping: VARIABLE_KEY --> CORRECT_ROW_IN_BASIS for(int k=0; k<mNodalDofs; k++){ if(KratosComponents<Variable<double>>::Has(mNodalVariablesNames[k])) { const auto& var = KratosComponents<Variable<double>>::Get(mNodalVariablesNames[k]); mMapPhi[var.Key()] = k; } else KRATOS_ERROR << "variable \""<< mNodalVariablesNames[k] << "\" not valid" << std::endl; } } /** Destructor. */ ~ROMBuilderAndSolver() = default; virtual void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart) override { KRATOS_TRY; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler auto &r_elements_array = rModelPart.Elements(); const int number_of_elements = static_cast<int>(r_elements_array.size()); DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations unsigned int nthreads = OpenMPUtils::GetNumThreads(); typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl; /** * Here we declare three sets. * - The global set: Contains all the DoF of the system * - The slave set: The DoF that are not going to be solved, due to MPC formulation */ set_type dof_global_set; dof_global_set.reserve(number_of_elements * 20); if (mHromSimulation == false && mTimeStep == 0){ int number_of_hrom_elements=0; #pragma omp parallel firstprivate(dof_list, second_dof_list) reduction(+:number_of_hrom_elements) { const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We create the temporal set and we reserve some space on them set_type dofs_tmp_set; dofs_tmp_set.reserve(20000); // Gets the array of elements from the modeler ModelPart::ElementsContainerType selected_elements_private; #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_elements; ++i) { auto it_elem = r_elements_array.begin() + i; //detect whether the element has a Hyperreduced Weight (H-ROM simulation) or not (ROM simulation) if ((it_elem)->Has(HROM_WEIGHT)){ selected_elements_private.push_back(*it_elem.base()); number_of_hrom_elements++; } else it_elem->SetValue(HROM_WEIGHT, 1.0); // Gets list of Dof involved on every element pScheme->GetDofList(*it_elem, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Gets the array of conditions from the modeler ConditionsArrayType &r_conditions_array = rModelPart.Conditions(); const int number_of_conditions = static_cast<int>(r_conditions_array.size()); ModelPart::ConditionsContainerType selected_conditions_private; #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_conditions; ++i) { auto it_cond = r_conditions_array.begin() + i; // Gather the H-reduced conditions that are to be considered for assembling. Ignoring those for displaying results only if (it_cond->Has(HROM_WEIGHT)){ selected_conditions_private.push_back(*it_cond.base()); number_of_hrom_elements++; } else it_cond->SetValue(HROM_WEIGHT, 1.0); // Gets list of Dof involved on every element pScheme->GetDofList(*it_cond, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } #pragma omp critical { for (auto &cond : selected_conditions_private){ mSelectedConditions.push_back(&cond); } for (auto &elem : selected_elements_private){ mSelectedElements.push_back(&elem); } } // Gets the array of constraints from the modeler auto &r_constraints_array = rModelPart.MasterSlaveConstraints(); const int number_of_constraints = static_cast<int>(r_constraints_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_constraints; ++i) { auto it_const = r_constraints_array.begin() + i; // Gets list of Dof involved on every element it_const->GetDofList(dof_list, second_dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end()); } // We merge all the sets in one thread #pragma omp critical { dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end()); } } if (number_of_hrom_elements>0){ mHromSimulation = true; } } else{ #pragma omp parallel firstprivate(dof_list, second_dof_list) { const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We cleate the temporal set and we reserve some space on them set_type dofs_tmp_set; dofs_tmp_set.reserve(20000); // Gets the array of elements from the modeler #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_elements; ++i) { auto it_elem = r_elements_array.begin() + i; // Gets list of Dof involved on every element pScheme->GetDofList(*it_elem, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Gets the array of conditions from the modeler ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); const int number_of_conditions = static_cast<int>(r_conditions_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_conditions; ++i) { auto it_cond = r_conditions_array.begin() + i; // Gets list of Dof involved on every element pScheme->GetDofList(*it_cond, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Gets the array of constraints from the modeler auto& r_constraints_array = rModelPart.MasterSlaveConstraints(); const int number_of_constraints = static_cast<int>(r_constraints_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_constraints; ++i) { auto it_const = r_constraints_array.begin() + i; // Gets list of Dof involved on every element it_const->GetDofList(dof_list, second_dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end()); } // We merge all the sets in one thread #pragma omp critical { dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end()); } } } KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl; DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dof_global_set.size()); for (auto it = dof_global_set.begin(); it != dof_global_set.end(); it++) { Doftemp.push_back(*it); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; //Throws an exception if there are no Degrees Of Freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl; BaseType::mDofSetIsInitialized = true; if (BaseType::mDofSetIsInitialized ==true) mTimeStep++; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl; #ifdef KRATOS_DEBUG // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode if (BaseType::GetCalculateReactionsFlag()) { for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id() << std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** organises the dofset in order to speed up the building phase */ virtual void SetUpSystem( ModelPart &r_model_part ) override { //int free_id = 0; BaseType::mEquationSystemSize = BaseType::mDofSet.size(); int ndofs = static_cast<int>(BaseType::mDofSet.size()); #pragma omp parallel for firstprivate(ndofs) for (int i = 0; i < static_cast<int>(ndofs); i++){ typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + i; dof_iterator->SetEquationId(i); } } // Vector ProjectToReducedBasis( // const TSystemVectorType& rX, // ModelPart::NodesContainerType& rNodes // ) // { // Vector rom_unknowns = ZeroVector(mRomDofs); // for(const auto& node : rNodes) // { // unsigned int node_aux_id = node.GetValue(AUX_ID); // const auto& nodal_rom_basis = node.GetValue(ROM_BASIS); // for (int i = 0; i < mRomDofs; ++i) { // for (int j = 0; j < mNodalDofs; ++j) { // rom_unknowns[i] += nodal_rom_basis(j, i)*rX(node_aux_id*mNodalDofs + j); // } // } // } // return rom_unknowns; // } void ProjectToFineBasis( const TSystemVectorType &rRomUnkowns, ModelPart &rModelPart, TSystemVectorType &Dx) { const auto dofs_begin = BaseType::mDofSet.begin(); const auto dofs_number = BaseType::mDofSet.size(); #pragma omp parallel firstprivate(dofs_begin, dofs_number) { const Matrix *pcurrent_rom_nodal_basis = nullptr; unsigned int old_dof_id; #pragma omp for nowait for (unsigned int k = 0; k<dofs_number; k++){ auto dof = dofs_begin + k; if(pcurrent_rom_nodal_basis == nullptr){ pcurrent_rom_nodal_basis = &(rModelPart.pGetNode(dof->Id())->GetValue(ROM_BASIS)); old_dof_id = dof->Id(); } else if(dof->Id() != old_dof_id ){ pcurrent_rom_nodal_basis = &(rModelPart.pGetNode(dof->Id())->GetValue(ROM_BASIS)); old_dof_id = dof->Id(); } Dx[dof->EquationId()] = inner_prod( row( *pcurrent_rom_nodal_basis , mMapPhi[dof->GetVariable().Key()] ) , rRomUnkowns); } } } void GetPhiElemental( Matrix &PhiElemental, const Element::DofsVectorType &dofs, const Element::GeometryType &geom) { const Matrix *pcurrent_rom_nodal_basis = nullptr; int counter = 0; for(unsigned int k = 0; k < dofs.size(); ++k){ auto variable_key = dofs[k]->GetVariable().Key(); if(k==0) pcurrent_rom_nodal_basis = &(geom[counter].GetValue(ROM_BASIS)); else if(dofs[k]->Id() != dofs[k-1]->Id()){ counter++; pcurrent_rom_nodal_basis = &(geom[counter].GetValue(ROM_BASIS)); } if (dofs[k]->IsFixed()) noalias(row(PhiElemental, k)) = ZeroVector(PhiElemental.size2()); else noalias(row(PhiElemental, k)) = row(*pcurrent_rom_nodal_basis, mMapPhi[variable_key]); } } /*@{ */ /** Function to perform the building and solving phase at the same time. It is ideally the fastest and safer function to use when it is possible to solve just after building */ virtual void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { //define a dense matrix to hold the reduced problem Matrix Arom = ZeroMatrix(mRomDofs, mRomDofs); Vector brom = ZeroVector(mRomDofs); TSystemVectorType x(Dx.size()); double project_to_reduced_start = OpenMPUtils::GetCurrentTime(); Vector xrom = ZeroVector(mRomDofs); //this->ProjectToReducedBasis(x, rModelPart.Nodes(),xrom); const double project_to_reduced_end = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Project to reduced basis time: " << project_to_reduced_end - project_to_reduced_start << std::endl; //build the system matrix by looping over elements and conditions and assembling to A KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; // Getting the elements from the model auto help_nelements = static_cast<int>(rModelPart.Elements().size()); auto help_el_begin = rModelPart.ElementsBegin(); const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); auto help_cond_begin = rModelPart.ConditionsBegin(); auto help_nconditions = static_cast<int>(rModelPart.Conditions().size()); if ( mHromSimulation == true){ // In case using the full modelpart, but only a set of selected elemets help_el_begin = mSelectedElements.begin(); help_nelements = static_cast<int>(mSelectedElements.size()); // Only selected conditions are considered for the calculation on an H-ROM simualtion. help_cond_begin = mSelectedConditions.begin(); help_nconditions = static_cast<int>(mSelectedConditions.size()); } // Getting the array of elements const auto nelements = help_nelements; const auto el_begin = help_el_begin; // Getting the array of the conditions const auto cond_begin = help_cond_begin; const auto nconditions = help_nconditions; //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; // assemble all elements double start_build = OpenMPUtils::GetCurrentTime(); #pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId, el_begin, cond_begin) { Matrix PhiElemental; Matrix tempA = ZeroMatrix(mRomDofs,mRomDofs); Vector tempb = ZeroVector(mRomDofs); Matrix aux; #pragma omp for nowait for (int k = 0; k < nelements; k++) { auto it_el = el_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if ((it_el)->IsDefined(ACTIVE)){ element_is_active = (it_el)->Is(ACTIVE); } //KRATOS_WATCH("\n\nentered elements loop\n\n") if (element_is_active){ //calculate elemental contribution pScheme->CalculateSystemContributions(*it_el, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); Element::DofsVectorType dofs; it_el->GetDofList(dofs, CurrentProcessInfo); const auto &geom = it_el->GetGeometry(); if(PhiElemental.size1() != dofs.size() || PhiElemental.size2() != mRomDofs) PhiElemental.resize(dofs.size(), mRomDofs,false); if(aux.size1() != dofs.size() || aux.size2() != mRomDofs) aux.resize(dofs.size(), mRomDofs,false); GetPhiElemental(PhiElemental, dofs, geom); noalias(aux) = prod(LHS_Contribution, PhiElemental); double h_rom_weight = it_el->GetValue(HROM_WEIGHT); noalias(tempA) += prod(trans(PhiElemental), aux) * h_rom_weight; noalias(tempb) += prod(trans(PhiElemental), RHS_Contribution) * h_rom_weight; } } #pragma omp for nowait for (int k = 0; k < nconditions; k++){ auto it = cond_begin + k; //detect if the element is active or not. If the user did not make any choice the condition //is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)){ condition_is_active = (it)->Is(ACTIVE); } //KRATOS_WATCH("\n\nentered conditions loop\n\n") if (condition_is_active){ Condition::DofsVectorType dofs; it->GetDofList(dofs, CurrentProcessInfo); //calculate elemental contribution pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); const auto &geom = it->GetGeometry(); if(PhiElemental.size1() != dofs.size() || PhiElemental.size2() != mRomDofs) PhiElemental.resize(dofs.size(), mRomDofs,false); if(aux.size1() != dofs.size() || aux.size2() != mRomDofs) aux.resize(dofs.size(), mRomDofs,false); GetPhiElemental(PhiElemental, dofs, geom); noalias(aux) = prod(LHS_Contribution, PhiElemental); double h_rom_weight = it->GetValue(HROM_WEIGHT); noalias(tempA) += prod(trans(PhiElemental), aux) * h_rom_weight; noalias(tempb) += prod(trans(PhiElemental), RHS_Contribution) * h_rom_weight; } } #pragma omp critical { noalias(Arom) +=tempA; noalias(brom) +=tempb; } } const double stop_build = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl; //solve for the rom unkowns dunk = Arom^-1 * brom Vector dxrom(xrom.size()); double start_solve = OpenMPUtils::GetCurrentTime(); MathUtils<double>::Solve(Arom, dxrom, brom); const double stop_solve = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Solve reduced system time: " << stop_solve - start_solve << std::endl; // //update database // noalias(xrom) += dxrom; //KRATOS_WATCH(dxrom) // project reduced solution back to full order model double project_to_fine_start = OpenMPUtils::GetCurrentTime(); ProjectToFineBasis(dxrom, rModelPart, Dx); const double project_to_fine_end = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Project to fine basis time: " << project_to_fine_end - project_to_fine_start << std::endl; } void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType &pA, TSystemVectorPointerType &pDx, TSystemVectorPointerType &pb, ModelPart &rModelPart) override { KRATOS_TRY if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } TSystemVectorType &Dx = *pDx; TSystemVectorType &b = *pb; if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); KRATOS_CATCH("") } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const override { return "ROMBuilderAndSolver"; } /// Print information about this object. virtual void PrintInfo(std::ostream &rOStream) const override { rOStream << Info(); } /// Print object's data. virtual void PrintData(std::ostream &rOStream) const override { rOStream << Info(); } /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ /** Pointer to the Model. */ typename TLinearSolver::Pointer mpLinearSystemSolver; //DofsArrayType mDofSet; std::vector<DofPointerType> mDofList; bool mReshapeMatrixFlag = false; /// flag taking care if the dof set was initialized ot not bool mDofSetIsInitialized = false; /// flag taking in account if it is needed or not to calculate the reactions bool mCalculateReactionsFlag = false; /// number of degrees of freedom of the problem to be solve unsigned int mEquationSystemSize; /*@} */ /**@name Protected Operators*/ /*@{ */ int mEchoLevel = 0; TSystemVectorPointerType mpReactionsVector; std::vector<std::string> mNodalVariablesNames; int mNodalDofs; unsigned int mRomDofs; std::unordered_map<Kratos::VariableData::KeyType,int> mMapPhi; ModelPart::ElementsContainerType mSelectedElements; ModelPart::ConditionsContainerType mSelectedConditions; bool mHromSimulation = false; int mTimeStep = 0; /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class ROMBuilderAndSolver */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_ROM_BUILDER_AND_SOLVER defined */
rose_slowInput.c
#include "omp.h" typedef double real8; /************************************************************************ * Function : StressZero * * Purpose : ************************************************************************/ void StressZero(real8 *newSxx,real8 *newSyy,real8 *newSzz,real8 *newTxy,real8 *newTxz,real8 *newTyz,const real8 *fun2j,const real8 *shearMod,real8 eosvmax,real8 stresscut,const int *zoneset,const real8 *vc,int length) { int i; int index; /* This value 1.e-20 is used to prevent underflow. It is NOT a cuttoff. DO NOT TOUCH THIS VALE. */ real8 stress2 = stresscut * 1.e-20; real8 nstres2 = -stress2; #pragma omp parallel for private (index,i) firstprivate (length,stress2) for (i = 0; i <= length - 1; i += 1) { index = zoneset[i]; if (shearMod[zoneset[i]] == 0.0 || fun2j[i] < stresscut || vc[i] >= eosvmax) { newSxx[i] = 0.0; newSyy[i] = 0.0; newSzz[i] = 0.0; newTxy[i] = 0.0; newTxz[i] = 0.0; newTyz[i] = 0.0; } #if 1 if (newSxx[i] < stress2 && newSxx[i] > nstres2) newSxx[i] = 0.; if (newSyy[i] < stress2 && newSyy[i] > nstres2) newSyy[i] = 0.; if (newSzz[i] < stress2 && newSzz[i] > nstres2) newSzz[i] = 0.; if (newTxy[i] < stress2 && newTxy[i] > nstres2) newTxy[i] = 0.; if (newTxz[i] < stress2 && newTxz[i] > nstres2) newTxz[i] = 0.; if (newTyz[i] < stress2 && newTyz[i] > nstres2) newTyz[i] = 0.; #endif } }
sharpen_AVX.c
#include <stdlib.h> #include <stdio.h> #include <sys/types.h> #include <unistd.h> #include <fcntl.h> #include <immintrin.h> typedef float FLOAT; // Cycle Counter Code // // Can be replaced with ippGetCpuFreqMhz and ippGetCpuClocks // when IPP core functions are available. // typedef unsigned int UINT32; typedef unsigned long long int UINT64; typedef unsigned char UINT8; // PPM Edge Enhancement Code // UINT8 header[22]; UINT8 R[76800]; UINT8 G[76800]; UINT8 B[76800]; FLOAT fR[76800]; FLOAT fG[76800]; FLOAT fB[76800]; UINT8 convR[76800]; UINT8 convG[76800]; UINT8 convB[76800]; #define K 4.0 FLOAT PSF[9] = {-K/8.0, -K/8.0, -K/8.0, -K/8.0, K+1.0, -K/8.0, -K/8.0, -K/8.0, -K/8.0}; int main(int argc, char *argv[]) { int fdin, fdout, bytesRead=0, bytesLeft, i, j, t, vuelta; double elapsed0, ucpu0, scpu0; double elapsed1, ucpu1, scpu1; FLOAT temp; if(argc < 2) { printf("Usage: sharpen file.ppm\n"); exit(-1); } else { if((fdin = open(argv[1], O_RDONLY, 0644)) < 0) { printf("Error opening %s\n", argv[1]); } if((fdout = open("sharpen.ppm", (O_RDWR | O_CREAT), 0666)) < 0) { printf("Error opening %s\n", argv[1]); } } bytesLeft=21; //printf("Reading header\n"); do { //printf("bytesRead=%d, bytesLeft=%d\n", bytesRead, bytesLeft); bytesRead=read(fdin, (void *)header, bytesLeft); bytesLeft -= bytesRead; } while(bytesLeft > 0); header[21]='\0'; //printf("header = %s\n", header); // Read RGB data for(i=0; i<76800; i++) { read(fdin, (void *)&R[i], 1); convR[i]=R[i]; read(fdin, (void *)&G[i], 1); convG[i]=G[i]; read(fdin, (void *)&B[i], 1); convB[i]=B[i]; } FLOAT kf = ((-K/8.0)); FLOAT compes_value = (K/8.0) + (K+1.0); __m256* ptr_1; __m256* ptr_2; __m256* ptr_3; __m256 resTR, resMR, resBR, resTot, resValue; __m256 kv; __m256 ck; kv = _mm256_set1_ps(kf); ck = _mm256_set1_ps(compes_value); __m256 _max_mask = _mm256_set1_ps(255.0); __m256 _min_mask = _mm256_set1_ps(0.0); // Start of convolution time stamp ctimer_(&elapsed0, &ucpu0, &scpu0); for (vuelta=1;vuelta<500;vuelta++) { for(i=0; i<76800; i++) { fR[i]=(FLOAT)R[i]; fG[i]=(FLOAT)G[i]; fB[i]=(FLOAT)B[i]; } // Skip first and last row, no neighbors to convolve with //#pragma omp parallel for private(j,resTR, resMR, resBR, resTot, resValue, ptr_1, ptr_2, ptr_3) for(i=1; i<239; i++) { // Skip first and last column, no neighbors to convolve with for(j=1; j<312; j+=8) { //printf("J: %ds\n",j); //Top row ptr_1=(__m256*)&fR[((i-1)*320)+j-1]; ptr_2=(__m256*)&fR[((i-1)*320)+j]; ptr_3=(__m256*)&fR[((i-1)*320)+j+1]; resTR = _mm256_add_ps(*ptr_1, (_mm256_add_ps(*ptr_2, *ptr_3))); //Midle row ptr_1=(__m256*)&fR[((i)*320)+j-1]; ptr_2=(__m256*)&fR[((i)*320)+j]; ptr_3=(__m256*)&fR[((i)*320)+j+1]; resMR=_mm256_add_ps(*ptr_1, (_mm256_add_ps(*ptr_2, *ptr_3))); //ptr_1=(__m256*)&fR[((i)*320)+j]; resValue=_mm256_mul_ps(*ptr_2,ck); //Bottom row ptr_1=(__m256*)&fR[((i+1)*320)+j-1]; ptr_2=(__m256*)&fR[((i+1)*320)+j]; ptr_3=(__m256*)&fR[((i+1)*320)+j+1]; resBR=_mm256_add_ps(*ptr_1, (_mm256_add_ps(*ptr_2, *ptr_3))); resTot=_mm256_add_ps(resBR, (_mm256_add_ps(resTR, resMR))); resTot=_mm256_mul_ps(resTot,kv); resTot=_mm256_add_ps(resTot,resValue); resTot = _mm256_max_ps(_min_mask, _mm256_min_ps(_max_mask, resTot)); convR[(i*320)+j]=(UINT8)resTot[0]; convR[(i*320)+j+1]=(UINT8)resTot[1]; convR[(i*320)+j+2]=(UINT8)resTot[2]; convR[(i*320)+j+3]=(UINT8)resTot[3]; convR[(i*320)+j+4]=(UINT8)resTot[4]; convR[(i*320)+j+5]=(UINT8)resTot[5]; convR[(i*320)+j+6]=(UINT8)resTot[6]; convR[(i*320)+j+7]=(UINT8)resTot[7]; ///////////////////////////////////// //Top row ptr_1=(__m256*)&fG[((i-1)*320)+j-1]; ptr_2=(__m256*)&fG[((i-1)*320)+j]; ptr_3=(__m256*)&fG[((i-1)*320)+j+1]; resTR = _mm256_add_ps(*ptr_1, (_mm256_add_ps(*ptr_2, *ptr_3))); //Midle row ptr_1=(__m256*)&fG[((i)*320)+j-1]; ptr_2=(__m256*)&fG[((i)*320)+j]; ptr_3=(__m256*)&fG[((i)*320)+j+1]; resMR=_mm256_add_ps(*ptr_1, (_mm256_add_ps(*ptr_2, *ptr_3))); //Midle row - Pixels of Interest resValue=_mm256_mul_ps(*ptr_2,ck); //Bottom row ptr_1=(__m256*)&fG[((i+1)*320)+j-1]; ptr_2=(__m256*)&fG[((i+1)*320)+j]; ptr_3=(__m256*)&fG[((i+1)*320)+j+1]; resBR=_mm256_add_ps(*ptr_1, (_mm256_add_ps(*ptr_2, *ptr_3))); resTot=_mm256_add_ps(resBR, (_mm256_add_ps(resTR, resMR))); resTot=_mm256_mul_ps(resTot,kv); resTot=_mm256_add_ps(resTot,resValue); resTot = _mm256_max_ps(_min_mask, _mm256_min_ps(_max_mask, resTot)); convG[(i*320)+j]=(UINT8)resTot[0]; convG[(i*320)+j+1]=(UINT8)resTot[1]; convG[(i*320)+j+2]=(UINT8)resTot[2]; convG[(i*320)+j+3]=(UINT8)resTot[3]; convG[(i*320)+j+4]=(UINT8)resTot[4]; convG[(i*320)+j+5]=(UINT8)resTot[5]; convG[(i*320)+j+6]=(UINT8)resTot[6]; convG[(i*320)+j+7]=(UINT8)resTot[7]; ///////////////////// //Top row ptr_1=(__m256*)&fB[((i-1)*320)+j-1]; ptr_2=(__m256*)&fB[((i-1)*320)+j]; ptr_3=(__m256*)&fB[((i-1)*320)+j+1]; resTR = _mm256_add_ps(*ptr_1, (_mm256_add_ps(*ptr_2, *ptr_3))); //Midle row ptr_1=(__m256*)&fB[((i)*320)+j-1]; ptr_2=(__m256*)&fB[((i)*320)+j]; ptr_3=(__m256*)&fB[((i)*320)+j+1]; resMR=_mm256_add_ps(*ptr_1, (_mm256_add_ps(*ptr_2, *ptr_3))); //Midle row - Pixels of Interest resValue=_mm256_mul_ps(*ptr_2,ck); //Bottom row ptr_1=(__m256*)&fB[((i+1)*320)+j-1]; ptr_2=(__m256*)&fB[((i+1)*320)+j]; ptr_3=(__m256*)&fB[((i+1)*320)+j+1]; resBR=_mm256_add_ps(*ptr_1, (_mm256_add_ps(*ptr_2, *ptr_3))); resTot=_mm256_add_ps(resBR, (_mm256_add_ps(resTR, resMR))); resTot=_mm256_mul_ps(resTot,kv); resTot=_mm256_add_ps(resTot,resValue); resTot = _mm256_max_ps(_min_mask, _mm256_min_ps(_max_mask, resTot)); convB[(i*320)+j]=(UINT8)resTot[0]; convB[(i*320)+j+1]=(UINT8)resTot[1]; convB[(i*320)+j+2]=(UINT8)resTot[2]; convB[(i*320)+j+3]=(UINT8)resTot[3]; convB[(i*320)+j+4]=(UINT8)resTot[4]; convB[(i*320)+j+5]=(UINT8)resTot[5]; convB[(i*320)+j+6]=(UINT8)resTot[6]; convB[(i*320)+j+7]=(UINT8)resTot[7]; } } } // End of convolution time stamp ctimer_(&elapsed1, &ucpu1, &scpu1); printf("Tiempo: %fs (real) %fs (cpu) %fs (sys)\n", elapsed1-elapsed0, ucpu1-ucpu0, scpu1-scpu0); write(fdout, (void *)header, 21); // Write RGB data for(i=0; i<76800; i++) { write(fdout, (void *)&convR[i], 1); write(fdout, (void *)&convG[i], 1); write(fdout, (void *)&convB[i], 1); } close(fdin); close(fdout); }
task1_omp.c
#include <math.h> #include <string.h> #include "timer.h" #include <stdio.h> #define NN 1024 #define NM 1024 float A[NN][NM]; float Anew[NN][NM]; int main(int argc, char** argv) { int i,j; const int n = NN; const int m = NM; const int iter_max = 1000; const double tol = 1.0e-6; double error = 1.0; memset(A, 0, n * m * sizeof(float)); memset(Anew, 0, n * m * sizeof(float)); for (j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; while ( error > tol && iter < iter_max ) { error = 0.0; #pragma omp parallel for reduction(max:error) for( j = 1; j < n-1; j++) { for( i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmax( error, fabs(Anew[j][i] - A[j][i])); } } #pragma omp parallel for for( j = 1; j < n-1; j++) { for( i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000); return 0; }
func.c
#include <stdio.h> int jac_seq(int N, double delta, double threshold, int max_iter, double *f, double *u, double *u_old) { int i,j,k=0; double *temp,d=10e+10; while (k < max_iter) { // Set u_old = u temp = u; u = u_old; u_old = temp; // Set distance = 0.0 d = 0.0; for (i = 1; i < N-1; i++) { for (j = 1; j < N-1; j++) { // Update u u[i*N + j] = 0.25 * (u_old[(i-1)*N + j] + u_old[(i+1)*N + j] + u_old[i*N + (j-1)] + u_old[i*N + (j+1)] + delta*delta*f[i*N + j]); // calculate distance d += (u[i*N + j] - u_old[i*N + j]) * (u[i*N + j] - u_old[i*N + j]); } } k++; } // return no. threads return(1); } int jac_seq_con(int N, double delta, double threshold, int max_iter, double *f, double *u, double *u_old) { int i,j,k=0; double *temp,d=10e+10; while (d > threshold*threshold && k < max_iter) { // Set u_old = u temp = u; u = u_old; u_old = temp; // Set distance = 0.0 d = 0.0; for (i = 1; i < N-1; i++) { for (j = 1; j < N-1; j++) { // Update u u[i*N + j] = 0.25 * (u_old[(i-1)*N + j] + u_old[(i+1)*N + j] + u_old[i*N + (j-1)] + u_old[i*N + (j+1)] + delta*delta*f[i*N + j]); // calculate distance d += (u[i*N + j] - u_old[i*N + j]) * (u[i*N + j] - u_old[i*N + j]); } } k++; } // return no. iterations return(k); } int jac_mp(int N, double delta, double threshold, int max_iter, double *f, double *u, double *u_old) { int i,j,threads,k=0; double *temp,d=10e+10; // get threads #pragma omp parallel { #pragma omp single { threads = omp_get_num_threads(); } } while (k < max_iter) { // Set u_old = u temp = u; u = u_old; u_old = temp; // Set distance = 0.0 d = 0.0; #pragma omp parallel shared(f, u, u_old, N,threads,d) private(i, j) { #pragma omp for reduction(+ : d) for (i = 1; i < N-1; i++) { for (j = 1; j < N-1; j++) { // Update u u[i*N + j] = 0.25 * (u_old[(i-1)*N + j] + u_old[(i+1)*N + j] + u_old[i*N + (j-1)] + u_old[i*N + (j+1)] + delta*delta*f[i*N + j]); // calculate distance d += (u[i*N + j] - u_old[i*N + j]) * (u[i*N + j] - u_old[i*N + j]); } } } /* end of parallel region */ k++; } // return no. threads return(threads); } int jac_mp_v2(int N, double delta, double threshold, int max_iter, double *f, double *u, double *u_old) { int i,j,threads,k=0; double *temp,d=10e+10; // get threads #pragma omp parallel { #pragma omp single { threads = omp_get_num_threads(); } } // do calculations #pragma omp parallel shared(f, u, u_old, N,threads,d) private(i, j) firstprivate(k) { while (k < max_iter) { #pragma omp single { // Set u_old = u temp = u; u = u_old; u_old = temp; // Set distance = 0.0 d = 0.0; } #pragma omp for reduction(+ : d) for (i = 1; i < N-1; i++) { for (j = 1; j < N-1; j++) { // Update u u[i*N + j] = 0.25 * (u_old[(i-1)*N + j] + u_old[(i+1)*N + j] + u_old[i*N + (j-1)] + u_old[i*N + (j+1)] + delta*delta*f[i*N + j]); // calculate distance d += (u[i*N + j] - u_old[i*N + j]) * (u[i*N + j] - u_old[i*N + j]); } } k++; }/* end while */ } /* end of parallel region */ return(threads); } int jac_mp_v3(int N, double delta, double threshold, int max_iter, double *f, double *u, double *u_old) { int i,j,threads,k=0; double *temp,d=10e+10; // get threads #pragma omp parallel { #pragma omp single { threads = omp_get_num_threads(); } } // do calculations #pragma omp parallel shared(f, u, u_old, N,threads,d) private(i, j) firstprivate(k) { while (k < max_iter) { #pragma omp single { // Set u_old = u temp = u; u = u_old; u_old = temp; // Set distance = 0.0 d = 0.0; } #pragma omp for reduction(+ : d) for (i = 1; i < N-1; i++) { for (j = 1; j < N-1; j++) { // Update u u[i*N + j] = 0.25 * (u_old[(i-1)*N + j] + u_old[(i+1)*N + j] + u_old[i*N + (j-1)] + u_old[i*N + (j+1)] + delta*delta*f[i*N + j]); // calculate distance d += (u[i*N + j] - u_old[i*N + j]) * (u[i*N + j] - u_old[i*N + j]); } } k++; }/* end while */ } /* end of parallel region */ return(threads); } int jac_mp_v23(int N, double delta, double threshold, int max_iter, double *f, double *u, double *u_old) { int i,j,threads,k=0; double *temp,d=10e+10; // get threads #pragma omp parallel { #pragma omp single { threads = omp_get_num_threads(); } } // do calculations #pragma omp parallel shared(f, u, u_old, N,threads,d) private(i, j) firstprivate(k) { while (k < max_iter) { #pragma omp single { // Set u_old = u temp = u; u = u_old; u_old = temp; // Set distance = 0.0 d = 0.0; } #pragma omp for reduction(+ : d) for (i = 1; i < N-1; i++) { for (j = 1; j < N-1; j++) { // Update u u[i*N + j] = 0.25 * (u_old[(i-1)*N + j] + u_old[(i+1)*N + j] + u_old[i*N + (j-1)] + u_old[i*N + (j+1)] + delta*delta*f[i*N + j]); // calculate distance d += (u[i*N + j] - u_old[i*N + j]) * (u[i*N + j] - u_old[i*N + j]); } } k++; }/* end while */ } /* end of parallel region */ return(threads); } int gauss(int N, double delta, double threshold, int max_iter, double *f, double *u) { int k=0; double u_ij,d=10e+10; while (k < max_iter) { d = 0.0; for (int i = 1; i < N-1; i++) { for (int j = 1; j < N-1; j++) { // Update u u_ij = u[i*N + j]; u[i*N + j] = 0.25 * (u[(i-1)*N + j] + u[(i+1)*N + j] + u[i*N + (j-1)] + u[i*N + (j+1)] + delta*delta*f[i*N + j]); // calculate distance d += (u[i*N + j] - u_ij) * (u[i*N + j] - u_ij); } } k++; } return(1); } int gauss_con(int N, double delta, double threshold, int max_iter, double *f, double *u) { int k=0; double u_ij,d=10e+10; while (d > threshold*threshold && k < max_iter) { d = 0.0; for (int i = 1; i < N-1; i++) { for (int j = 1; j < N-1; j++) { // Update u u_ij = u[i*N + j]; u[i*N + j] = 0.25 * (u[(i-1)*N + j] + u[(i+1)*N + j] + u[i*N + (j-1)] + u[i*N + (j+1)] + delta*delta*f[i*N + j]); // calculate distance d += (u[i*N + j] - u_ij) * (u[i*N + j] - u_ij); } } k++; } return(k); } int mandel(int disp_width, int disp_height, int *array, int max_iter) { double x, y, u, v, u2, v2, scale_real, scale_imag; int i, j, iter, threads; scale_real = 3.5 / (double)disp_width; scale_imag = 3.5 / (double)disp_height; // get threads #pragma omp parallel { #pragma omp single { threads = omp_get_num_threads(); } } #pragma omp parallel shared(array,disp_width,disp_height,scale_real,scale_imag,max_iter) private(x, y, u, v, u2, v2, i, j, iter) { #pragma omp for for(i = 0; i < disp_width; i++) { x = ((double)i * scale_real) - 2.25; for(j = 0; j < disp_height; j++) { y = ((double)j * scale_imag) - 1.75; u = 0.0; v = 0.0; u2 = 0.0; v2 = 0.0; iter = 0; while ( u2 + v2 < 4.0 && iter < max_iter ) { v = 2 * v * u + y; u = u2 - v2 + x; u2 = u*u; v2 = v*v; iter = iter + 1; } // if we exceed max_iter, reset to zero iter = iter == max_iter ? 0 : iter; array[i*disp_height + j] = iter; } } } return(threads); } void write_result(double *U, int N, double delta, char filename[20]) { double u, y, x; FILE *matrix=fopen(filename, "w"); for (int i = 0; i < N; i++) { x = -1.0 + i * delta + delta * 0.5; for (int j = 0; j < N; j++) { y = -1.0 + j * delta + delta * 0.5; u = U[i*N + j]; fprintf(matrix, "%g\t%g\t%g\n", x,y,u); } } fclose(matrix); }
conv3x3s1_winograd64_pack4_neon_BdB.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "option.h" #include "mat.h" namespace ncnn{ static void conv3x3s1_winograd64_pack4_neon_BdB(const Mat& bottom_blob, Mat& top_blob, const Option& opt, int inch, int outh, int outw) { int w = bottom_blob.w; //int h = bottom_blob.h; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; Mat bottom_blob_bordered = bottom_blob; // BEGIN transform input Mat bottom_blob_tm = top_blob; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m=0; m<8; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm/8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m=0; m<8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } } }
GB_unaryop__abs_int32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_int8 // op(A') function: GB_tran__abs_int32_int8 // C type: int32_t // A type: int8_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_int8 ( int32_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
decl.c
/* Process declarations and variables for -*- C++ -*- compiler. Copyright (C) 1988-2020 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Process declarations and symbol lookup for C++ front end. Also constructs types; the standard scalar types at initialization, and structure, union, array and enum types when they are declared. */ /* ??? not all decl nodes are given the most useful possible line numbers. For example, the CONST_DECLs for enum values. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "target.h" #include "c-family/c-target.h" #include "cp-tree.h" #include "timevar.h" #include "stringpool.h" #include "cgraph.h" #include "stor-layout.h" #include "varasm.h" #include "attribs.h" #include "flags.h" #include "tree-iterator.h" #include "decl.h" #include "intl.h" #include "toplev.h" #include "c-family/c-objc.h" #include "c-family/c-pragma.h" #include "c-family/c-ubsan.h" #include "debug.h" #include "plugin.h" #include "builtins.h" #include "gimplify.h" #include "asan.h" #include "gcc-rich-location.h" #include "langhooks.h" #include "omp-general.h" /* Possible cases of bad specifiers type used by bad_specifiers. */ enum bad_spec_place { BSP_VAR, /* variable */ BSP_PARM, /* parameter */ BSP_TYPE, /* type */ BSP_FIELD /* field */ }; static const char *redeclaration_error_message (tree, tree); static int decl_jump_unsafe (tree); static void require_complete_types_for_parms (tree); static tree grok_reference_init (tree, tree, tree, int); static tree grokvardecl (tree, tree, tree, const cp_decl_specifier_seq *, int, int, int, bool, int, tree, location_t); static void check_static_variable_definition (tree, tree); static void record_unknown_type (tree, const char *); static int member_function_or_else (tree, tree, enum overload_flags); static tree local_variable_p_walkfn (tree *, int *, void *); static const char *tag_name (enum tag_types); static tree lookup_and_check_tag (enum tag_types, tree, TAG_how, bool); static void maybe_deduce_size_from_array_init (tree, tree); static void layout_var_decl (tree); static tree check_initializer (tree, tree, int, vec<tree, va_gc> **); static void make_rtl_for_nonlocal_decl (tree, tree, const char *); static void copy_type_enum (tree , tree); static void check_function_type (tree, tree); static void finish_constructor_body (void); static void begin_destructor_body (void); static void finish_destructor_body (void); static void record_key_method_defined (tree); static tree create_array_type_for_decl (tree, tree, tree, location_t); static tree get_atexit_node (void); static tree get_dso_handle_node (void); static tree start_cleanup_fn (void); static void end_cleanup_fn (void); static tree cp_make_fname_decl (location_t, tree, int); static void initialize_predefined_identifiers (void); static tree check_special_function_return_type (special_function_kind, tree, tree, int, const location_t*); static tree push_cp_library_fn (enum tree_code, tree, int); static tree build_cp_library_fn (tree, enum tree_code, tree, int); static void store_parm_decls (tree); static void initialize_local_var (tree, tree); static void expand_static_init (tree, tree); static location_t smallest_type_location (const cp_decl_specifier_seq*); /* The following symbols are subsumed in the cp_global_trees array, and listed here individually for documentation purposes. C++ extensions tree wchar_decl_node; tree vtable_entry_type; tree delta_type_node; tree __t_desc_type_node; tree class_type_node; tree unknown_type_node; Array type `vtable_entry_type[]' tree vtbl_type_node; tree vtbl_ptr_type_node; Namespaces, tree std_node; tree abi_node; A FUNCTION_DECL which can call `abort'. Not necessarily the one that the user will declare, but sufficient to be called by routines that want to abort the program. tree abort_fndecl; Used by RTTI tree type_info_type_node, tinfo_decl_id, tinfo_decl_type; tree tinfo_var_id; */ tree cp_global_trees[CPTI_MAX]; /* A list of objects which have constructors or destructors which reside in the global scope. The decl is stored in the TREE_VALUE slot and the initializer is stored in the TREE_PURPOSE slot. */ tree static_aggregates; /* Like static_aggregates, but for thread_local variables. */ tree tls_aggregates; /* -- end of C++ */ /* A node for the integer constant 2. */ tree integer_two_node; /* vector of static decls. */ vec<tree, va_gc> *static_decls; /* vector of keyed classes. */ vec<tree, va_gc> *keyed_classes; /* Used only for jumps to as-yet undefined labels, since jumps to defined labels can have their validity checked immediately. */ struct GTY((chain_next ("%h.next"))) named_label_use_entry { struct named_label_use_entry *next; /* The binding level to which this entry is *currently* attached. This is initially the binding level in which the goto appeared, but is modified as scopes are closed. */ cp_binding_level *binding_level; /* The head of the names list that was current when the goto appeared, or the inner scope popped. These are the decls that will *not* be skipped when jumping to the label. */ tree names_in_scope; /* The location of the goto, for error reporting. */ location_t o_goto_locus; /* True if an OpenMP structured block scope has been closed since the goto appeared. This means that the branch from the label will illegally exit an OpenMP scope. */ bool in_omp_scope; }; /* A list of all LABEL_DECLs in the function that have names. Here so we can clear out their names' definitions at the end of the function, and so we can check the validity of jumps to these labels. */ struct GTY((for_user)) named_label_entry { tree name; /* Name of decl. */ tree label_decl; /* LABEL_DECL, unless deleted local label. */ named_label_entry *outer; /* Outer shadowed chain. */ /* The binding level to which the label is *currently* attached. This is initially set to the binding level in which the label is defined, but is modified as scopes are closed. */ cp_binding_level *binding_level; /* The head of the names list that was current when the label was defined, or the inner scope popped. These are the decls that will be skipped when jumping to the label. */ tree names_in_scope; /* A vector of all decls from all binding levels that would be crossed by a backward branch to the label. */ vec<tree, va_gc> *bad_decls; /* A list of uses of the label, before the label is defined. */ named_label_use_entry *uses; /* The following bits are set after the label is defined, and are updated as scopes are popped. They indicate that a jump to the label will illegally enter a scope of the given flavor. */ bool in_try_scope; bool in_catch_scope; bool in_omp_scope; bool in_transaction_scope; bool in_constexpr_if; }; #define named_labels cp_function_chain->x_named_labels /* The number of function bodies which we are currently processing. (Zero if we are at namespace scope, one inside the body of a function, two inside the body of a function in a local class, etc.) */ int function_depth; /* Whether the exception-specifier is part of a function type (i.e. C++17). */ bool flag_noexcept_type; /* States indicating how grokdeclarator() should handle declspecs marked with __attribute__((deprecated)). An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ enum deprecated_states deprecated_state = DEPRECATED_NORMAL; /* A list of VAR_DECLs whose type was incomplete at the time the variable was declared. */ struct GTY(()) incomplete_var { tree decl; tree incomplete_type; }; static GTY(()) vec<incomplete_var, va_gc> *incomplete_vars; /* Returns the kind of template specialization we are currently processing, given that it's declaration contained N_CLASS_SCOPES explicit scope qualifications. */ tmpl_spec_kind current_tmpl_spec_kind (int n_class_scopes) { int n_template_parm_scopes = 0; int seen_specialization_p = 0; int innermost_specialization_p = 0; cp_binding_level *b; /* Scan through the template parameter scopes. */ for (b = current_binding_level; b->kind == sk_template_parms; b = b->level_chain) { /* If we see a specialization scope inside a parameter scope, then something is wrong. That corresponds to a declaration like: template <class T> template <> ... which is always invalid since [temp.expl.spec] forbids the specialization of a class member template if the enclosing class templates are not explicitly specialized as well. */ if (b->explicit_spec_p) { if (n_template_parm_scopes == 0) innermost_specialization_p = 1; else seen_specialization_p = 1; } else if (seen_specialization_p == 1) return tsk_invalid_member_spec; ++n_template_parm_scopes; } /* Handle explicit instantiations. */ if (processing_explicit_instantiation) { if (n_template_parm_scopes != 0) /* We've seen a template parameter list during an explicit instantiation. For example: template <class T> template void f(int); This is erroneous. */ return tsk_invalid_expl_inst; else return tsk_expl_inst; } if (n_template_parm_scopes < n_class_scopes) /* We've not seen enough template headers to match all the specialized classes present. For example: template <class T> void R<T>::S<T>::f(int); This is invalid; there needs to be one set of template parameters for each class. */ return tsk_insufficient_parms; else if (n_template_parm_scopes == n_class_scopes) /* We're processing a non-template declaration (even though it may be a member of a template class.) For example: template <class T> void S<T>::f(int); The `class T' matches the `S<T>', leaving no template headers corresponding to the `f'. */ return tsk_none; else if (n_template_parm_scopes > n_class_scopes + 1) /* We've got too many template headers. For example: template <> template <class T> void f (T); There need to be more enclosing classes. */ return tsk_excessive_parms; else /* This must be a template. It's of the form: template <class T> template <class U> void S<T>::f(U); This is a specialization if the innermost level was a specialization; otherwise it's just a definition of the template. */ return innermost_specialization_p ? tsk_expl_spec : tsk_template; } /* Exit the current scope. */ void finish_scope (void) { poplevel (0, 0, 0); } /* When a label goes out of scope, check to see if that label was used in a valid manner, and issue any appropriate warnings or errors. */ static void check_label_used (tree label) { if (!processing_template_decl) { if (DECL_INITIAL (label) == NULL_TREE) { location_t location; error ("label %q+D used but not defined", label); location = input_location; /* FIXME want (LOCATION_FILE (input_location), (line)0) */ /* Avoid crashing later. */ define_label (location, DECL_NAME (label)); } else warn_for_unused_label (label); } } /* Helper function to sort named label entries in a vector by DECL_UID. */ static int sort_labels (const void *a, const void *b) { tree label1 = *(tree const *) a; tree label2 = *(tree const *) b; /* DECL_UIDs can never be equal. */ return DECL_UID (label1) > DECL_UID (label2) ? -1 : +1; } /* At the end of a function, all labels declared within the function go out of scope. BLOCK is the top-level block for the function. */ static void pop_labels (tree block) { if (!named_labels) return; /* We need to add the labels to the block chain, so debug information is emitted. But, we want the order to be stable so need to sort them first. Otherwise the debug output could be randomly ordered. I guess it's mostly stable, unless the hash table implementation changes. */ auto_vec<tree, 32> labels (named_labels->elements ()); hash_table<named_label_hash>::iterator end (named_labels->end ()); for (hash_table<named_label_hash>::iterator iter (named_labels->begin ()); iter != end; ++iter) { named_label_entry *ent = *iter; gcc_checking_assert (!ent->outer); if (ent->label_decl) labels.quick_push (ent->label_decl); ggc_free (ent); } named_labels = NULL; labels.qsort (sort_labels); while (labels.length ()) { tree label = labels.pop (); DECL_CHAIN (label) = BLOCK_VARS (block); BLOCK_VARS (block) = label; check_label_used (label); } } /* At the end of a block with local labels, restore the outer definition. */ static void pop_local_label (tree id, tree label) { check_label_used (label); named_label_entry **slot = named_labels->find_slot_with_hash (id, IDENTIFIER_HASH_VALUE (id), NO_INSERT); named_label_entry *ent = *slot; if (ent->outer) ent = ent->outer; else { ent = ggc_cleared_alloc<named_label_entry> (); ent->name = id; } *slot = ent; } /* The following two routines are used to interface to Objective-C++. The binding level is purposely treated as an opaque type. */ void * objc_get_current_scope (void) { return current_binding_level; } /* The following routine is used by the NeXT-style SJLJ exceptions; variables get marked 'volatile' so as to not be clobbered by _setjmp()/_longjmp() calls. All variables in the current scope, as well as parent scopes up to (but not including) ENCLOSING_BLK shall be thusly marked. */ void objc_mark_locals_volatile (void *enclosing_blk) { cp_binding_level *scope; for (scope = current_binding_level; scope && scope != enclosing_blk; scope = scope->level_chain) { tree decl; for (decl = scope->names; decl; decl = TREE_CHAIN (decl)) objc_volatilize_decl (decl); /* Do not climb up past the current function. */ if (scope->kind == sk_function_parms) break; } } /* True if B is the level for the condition of a constexpr if. */ static bool level_for_constexpr_if (cp_binding_level *b) { return (b->kind == sk_cond && b->this_entity && TREE_CODE (b->this_entity) == IF_STMT && IF_STMT_CONSTEXPR_P (b->this_entity)); } /* Update data for defined and undefined labels when leaving a scope. */ int poplevel_named_label_1 (named_label_entry **slot, cp_binding_level *bl) { named_label_entry *ent = *slot; cp_binding_level *obl = bl->level_chain; if (ent->binding_level == bl) { tree decl; /* ENT->NAMES_IN_SCOPE may contain a mixture of DECLs and TREE_LISTs representing OVERLOADs, so be careful. */ for (decl = ent->names_in_scope; decl; decl = (DECL_P (decl) ? DECL_CHAIN (decl) : TREE_CHAIN (decl))) if (decl_jump_unsafe (decl)) vec_safe_push (ent->bad_decls, decl); ent->binding_level = obl; ent->names_in_scope = obl->names; switch (bl->kind) { case sk_try: ent->in_try_scope = true; break; case sk_catch: ent->in_catch_scope = true; break; case sk_omp: ent->in_omp_scope = true; break; case sk_transaction: ent->in_transaction_scope = true; break; case sk_block: if (level_for_constexpr_if (bl->level_chain)) ent->in_constexpr_if = true; break; default: break; } } else if (ent->uses) { struct named_label_use_entry *use; for (use = ent->uses; use ; use = use->next) if (use->binding_level == bl) { use->binding_level = obl; use->names_in_scope = obl->names; if (bl->kind == sk_omp) use->in_omp_scope = true; } } return 1; } /* Saved errorcount to avoid -Wunused-but-set-{parameter,variable} warnings when errors were reported, except for -Werror-unused-but-set-*. */ static int unused_but_set_errorcount; /* Exit a binding level. Pop the level off, and restore the state of the identifier-decl mappings that were in effect when this level was entered. If KEEP == 1, this level had explicit declarations, so and create a "block" (a BLOCK node) for the level to record its declarations and subblocks for symbol table output. If FUNCTIONBODY is nonzero, this level is the body of a function, so create a block as if KEEP were set and also clear out all label names. If REVERSE is nonzero, reverse the order of decls before putting them into the BLOCK. */ tree poplevel (int keep, int reverse, int functionbody) { tree link; /* The chain of decls was accumulated in reverse order. Put it into forward order, just for cleanliness. */ tree decls; tree subblocks; tree block; tree decl; scope_kind kind; bool subtime = timevar_cond_start (TV_NAME_LOOKUP); restart: block = NULL_TREE; gcc_assert (current_binding_level->kind != sk_class && current_binding_level->kind != sk_namespace); if (current_binding_level->kind == sk_cleanup) functionbody = 0; subblocks = functionbody >= 0 ? current_binding_level->blocks : 0; gcc_assert (!vec_safe_length (current_binding_level->class_shadowed)); /* We used to use KEEP == 2 to indicate that the new block should go at the beginning of the list of blocks at this binding level, rather than the end. This hack is no longer used. */ gcc_assert (keep == 0 || keep == 1); if (current_binding_level->keep) keep = 1; /* Any uses of undefined labels, and any defined labels, now operate under constraints of next binding contour. */ if (cfun && !functionbody && named_labels) named_labels->traverse<cp_binding_level *, poplevel_named_label_1> (current_binding_level); /* Get the decls in the order they were written. Usually current_binding_level->names is in reverse order. But parameter decls were previously put in forward order. */ decls = current_binding_level->names; if (reverse) { decls = nreverse (decls); current_binding_level->names = decls; } /* If there were any declarations or structure tags in that level, or if this level is a function body, create a BLOCK to record them for the life of this function. */ block = NULL_TREE; /* Avoid function body block if possible. */ if (functionbody && subblocks && BLOCK_CHAIN (subblocks) == NULL_TREE) keep = 0; else if (keep == 1 || functionbody) block = make_node (BLOCK); if (block != NULL_TREE) { BLOCK_VARS (block) = decls; BLOCK_SUBBLOCKS (block) = subblocks; } /* In each subblock, record that this is its superior. */ if (keep >= 0) for (link = subblocks; link; link = BLOCK_CHAIN (link)) BLOCK_SUPERCONTEXT (link) = block; /* Before we remove the declarations first check for unused variables. */ if ((warn_unused_variable || warn_unused_but_set_variable) && current_binding_level->kind != sk_template_parms && !processing_template_decl) for (tree d = get_local_decls (); d; d = TREE_CHAIN (d)) { /* There are cases where D itself is a TREE_LIST. See in push_local_binding where the list of decls returned by getdecls is built. */ decl = TREE_CODE (d) == TREE_LIST ? TREE_VALUE (d) : d; tree type = TREE_TYPE (decl); if (VAR_P (decl) && (! TREE_USED (decl) || !DECL_READ_P (decl)) && ! DECL_IN_SYSTEM_HEADER (decl) /* For structured bindings, consider only real variables, not subobjects. */ && (DECL_DECOMPOSITION_P (decl) ? !DECL_DECOMP_BASE (decl) : (DECL_NAME (decl) && !DECL_ARTIFICIAL (decl))) && type != error_mark_node && (!CLASS_TYPE_P (type) || !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type) || lookup_attribute ("warn_unused", TYPE_ATTRIBUTES (TREE_TYPE (decl))))) { if (! TREE_USED (decl)) { if (!DECL_NAME (decl) && DECL_DECOMPOSITION_P (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_variable, "unused structured binding declaration"); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_variable, "unused variable %qD", decl); } else if (DECL_CONTEXT (decl) == current_function_decl // For -Wunused-but-set-variable leave references alone. && !TYPE_REF_P (TREE_TYPE (decl)) && errorcount == unused_but_set_errorcount) { if (!DECL_NAME (decl) && DECL_DECOMPOSITION_P (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_variable, "structured " "binding declaration set but not used"); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_variable, "variable %qD set but not used", decl); unused_but_set_errorcount = errorcount; } } } /* Remove declarations for all the DECLs in this level. */ for (link = decls; link; link = TREE_CHAIN (link)) { tree name; if (TREE_CODE (link) == TREE_LIST) { decl = TREE_VALUE (link); name = TREE_PURPOSE (link); gcc_checking_assert (name); } else { decl = link; name = DECL_NAME (decl); } /* Remove the binding. */ if (TREE_CODE (decl) == LABEL_DECL) pop_local_label (name, decl); else pop_local_binding (name, decl); } /* Restore the IDENTIFIER_TYPE_VALUEs. */ for (link = current_binding_level->type_shadowed; link; link = TREE_CHAIN (link)) SET_IDENTIFIER_TYPE_VALUE (TREE_PURPOSE (link), TREE_VALUE (link)); /* There may be OVERLOADs (wrapped in TREE_LISTs) on the BLOCK_VARs list if a `using' declaration put them there. The debugging back ends won't understand OVERLOAD, so we remove them here. Because the BLOCK_VARS are (temporarily) shared with CURRENT_BINDING_LEVEL->NAMES we must do this fixup after we have popped all the bindings. Also remove undeduced 'auto' decls, which LTO doesn't understand, and can't have been used by anything. */ if (block) { tree* d; for (d = &BLOCK_VARS (block); *d; ) { if (TREE_CODE (*d) == TREE_LIST || (!processing_template_decl && undeduced_auto_decl (*d))) *d = TREE_CHAIN (*d); else d = &DECL_CHAIN (*d); } } /* If the level being exited is the top level of a function, check over all the labels. */ if (functionbody) { if (block) { /* Since this is the top level block of a function, the vars are the function's parameters. Don't leave them in the BLOCK because they are found in the FUNCTION_DECL instead. */ BLOCK_VARS (block) = 0; pop_labels (block); } else pop_labels (subblocks); } kind = current_binding_level->kind; if (kind == sk_cleanup) { tree stmt; /* If this is a temporary binding created for a cleanup, then we'll have pushed a statement list level. Pop that, create a new BIND_EXPR for the block, and insert it into the stream. */ stmt = pop_stmt_list (current_binding_level->statement_list); stmt = c_build_bind_expr (input_location, block, stmt); add_stmt (stmt); } leave_scope (); if (functionbody) { /* The current function is being defined, so its DECL_INITIAL should be error_mark_node. */ gcc_assert (DECL_INITIAL (current_function_decl) == error_mark_node); DECL_INITIAL (current_function_decl) = block ? block : subblocks; if (subblocks) { if (FUNCTION_NEEDS_BODY_BLOCK (current_function_decl)) { if (BLOCK_SUBBLOCKS (subblocks)) BLOCK_OUTER_CURLY_BRACE_P (BLOCK_SUBBLOCKS (subblocks)) = 1; } else BLOCK_OUTER_CURLY_BRACE_P (subblocks) = 1; } } else if (block) current_binding_level->blocks = block_chainon (current_binding_level->blocks, block); /* If we did not make a block for the level just exited, any blocks made for inner levels (since they cannot be recorded as subblocks in that level) must be carried forward so they will later become subblocks of something else. */ else if (subblocks) current_binding_level->blocks = block_chainon (current_binding_level->blocks, subblocks); /* Each and every BLOCK node created here in `poplevel' is important (e.g. for proper debugging information) so if we created one earlier, mark it as "used". */ if (block) TREE_USED (block) = 1; /* All temporary bindings created for cleanups are popped silently. */ if (kind == sk_cleanup) goto restart; timevar_cond_stop (TV_NAME_LOOKUP, subtime); return block; } /* Call wrapup_globals_declarations for the globals in NAMESPACE. */ /* Diagnose odr-used extern inline variables without definitions in the current TU. */ int wrapup_namespace_globals () { if (vec<tree, va_gc> *statics = static_decls) { tree decl; unsigned int i; FOR_EACH_VEC_ELT (*statics, i, decl) { if (warn_unused_function && TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) == 0 && DECL_EXTERNAL (decl) && !TREE_PUBLIC (decl) && !DECL_ARTIFICIAL (decl) && !DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (decl) && !TREE_NO_WARNING (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_function, "%qF declared %<static%> but never defined", decl); if (VAR_P (decl) && DECL_EXTERNAL (decl) && DECL_INLINE_VAR_P (decl) && DECL_ODR_USED (decl)) error_at (DECL_SOURCE_LOCATION (decl), "odr-used inline variable %qD is not defined", decl); } /* Clear out the list, so we don't rescan next time. */ static_decls = NULL; /* Write out any globals that need to be output. */ return wrapup_global_declarations (statics->address (), statics->length ()); } return 0; } /* In C++, you don't have to write `struct S' to refer to `S'; you can just use `S'. We accomplish this by creating a TYPE_DECL as if the user had written `typedef struct S S'. Create and return the TYPE_DECL for TYPE. */ tree create_implicit_typedef (tree name, tree type) { tree decl; decl = build_decl (input_location, TYPE_DECL, name, type); DECL_ARTIFICIAL (decl) = 1; /* There are other implicit type declarations, like the one *within* a class that allows you to write `S::S'. We must distinguish amongst these. */ SET_DECL_IMPLICIT_TYPEDEF_P (decl); TYPE_NAME (type) = decl; TYPE_STUB_DECL (type) = decl; return decl; } /* Function-scope local entities that need discriminators. Each entry is a {decl,name} pair. VAR_DECLs for anon unions get their name smashed, so we cannot rely on DECL_NAME. */ static GTY((deletable)) vec<tree, va_gc> *local_entities; /* Determine the mangling discriminator of local DECL. There are generally very few of these in any particular function. */ void determine_local_discriminator (tree decl) { bool subtime = timevar_cond_start (TV_NAME_LOOKUP); retrofit_lang_decl (decl); tree ctx = DECL_CONTEXT (decl); tree name = (TREE_CODE (decl) == TYPE_DECL && TYPE_UNNAMED_P (TREE_TYPE (decl)) ? NULL_TREE : DECL_NAME (decl)); size_t nelts = vec_safe_length (local_entities); for (size_t i = 0; i < nelts; i += 2) { tree *pair = &(*local_entities)[i]; tree d = pair[0]; tree n = pair[1]; gcc_checking_assert (d != decl); if (name == n && TREE_CODE (decl) == TREE_CODE (d) && ctx == DECL_CONTEXT (d)) { tree disc = integer_one_node; if (DECL_DISCRIMINATOR (d)) disc = build_int_cst (TREE_TYPE (disc), TREE_INT_CST_LOW (DECL_DISCRIMINATOR (d)) + 1); DECL_DISCRIMINATOR (decl) = disc; /* Replace the saved decl. */ pair[0] = decl; decl = NULL_TREE; break; } } if (decl) { vec_safe_reserve (local_entities, 2); local_entities->quick_push (decl); local_entities->quick_push (name); } timevar_cond_stop (TV_NAME_LOOKUP, subtime); } /* Returns true if functions FN1 and FN2 have equivalent trailing requires clauses. */ static bool function_requirements_equivalent_p (tree newfn, tree oldfn) { /* In the concepts TS, the combined constraints are compared. */ if (cxx_dialect < cxx20) { tree ci1 = get_constraints (oldfn); tree ci2 = get_constraints (newfn); tree req1 = ci1 ? CI_ASSOCIATED_CONSTRAINTS (ci1) : NULL_TREE; tree req2 = ci2 ? CI_ASSOCIATED_CONSTRAINTS (ci2) : NULL_TREE; return cp_tree_equal (req1, req2); } /* Compare only trailing requirements. */ tree reqs1 = get_trailing_function_requirements (newfn); tree reqs2 = get_trailing_function_requirements (oldfn); if ((reqs1 != NULL_TREE) != (reqs2 != NULL_TREE)) return false; /* Substitution is needed when friends are involved. */ reqs1 = maybe_substitute_reqs_for (reqs1, newfn); reqs2 = maybe_substitute_reqs_for (reqs2, oldfn); return cp_tree_equal (reqs1, reqs2); } /* Subroutine of duplicate_decls: return truthvalue of whether or not types of these decls match. For C++, we must compare the parameter list so that `int' can match `int&' in a parameter position, but `int&' is not confused with `const int&'. */ int decls_match (tree newdecl, tree olddecl, bool record_versions /* = true */) { int types_match; if (newdecl == olddecl) return 1; if (TREE_CODE (newdecl) != TREE_CODE (olddecl)) /* If the two DECLs are not even the same kind of thing, we're not interested in their types. */ return 0; gcc_assert (DECL_P (newdecl)); if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* Specializations of different templates are different functions even if they have the same type. */ tree t1 = (DECL_USE_TEMPLATE (newdecl) ? DECL_TI_TEMPLATE (newdecl) : NULL_TREE); tree t2 = (DECL_USE_TEMPLATE (olddecl) ? DECL_TI_TEMPLATE (olddecl) : NULL_TREE); if (t1 != t2) return 0; if (CP_DECL_CONTEXT (newdecl) != CP_DECL_CONTEXT (olddecl) && ! (DECL_EXTERN_C_P (newdecl) && DECL_EXTERN_C_P (olddecl))) return 0; /* A new declaration doesn't match a built-in one unless it is also extern "C". */ if (DECL_IS_BUILTIN (olddecl) && DECL_EXTERN_C_P (olddecl) && !DECL_EXTERN_C_P (newdecl)) return 0; tree f1 = TREE_TYPE (newdecl); tree f2 = TREE_TYPE (olddecl); if (TREE_CODE (f1) != TREE_CODE (f2)) return 0; /* A declaration with deduced return type should use its pre-deduction type for declaration matching. */ tree r2 = fndecl_declared_return_type (olddecl); tree r1 = fndecl_declared_return_type (newdecl); tree p1 = TYPE_ARG_TYPES (f1); tree p2 = TYPE_ARG_TYPES (f2); if (same_type_p (r1, r2)) { if (!prototype_p (f2) && DECL_EXTERN_C_P (olddecl) && fndecl_built_in_p (olddecl)) { types_match = self_promoting_args_p (p1); if (p1 == void_list_node) TREE_TYPE (newdecl) = TREE_TYPE (olddecl); } else types_match = compparms (p1, p2) && type_memfn_rqual (f1) == type_memfn_rqual (f2) && (TYPE_ATTRIBUTES (TREE_TYPE (newdecl)) == NULL_TREE || comp_type_attributes (TREE_TYPE (newdecl), TREE_TYPE (olddecl)) != 0); } else types_match = 0; /* Two function declarations match if either has a requires-clause then both have a requires-clause and their constraints-expressions are equivalent. */ if (types_match && flag_concepts) types_match = function_requirements_equivalent_p (newdecl, olddecl); /* The decls dont match if they correspond to two different versions of the same function. Disallow extern "C" functions to be versions for now. */ if (types_match && !DECL_EXTERN_C_P (newdecl) && !DECL_EXTERN_C_P (olddecl) && record_versions && maybe_version_functions (newdecl, olddecl, (!DECL_FUNCTION_VERSIONED (newdecl) || !DECL_FUNCTION_VERSIONED (olddecl)))) return 0; } else if (TREE_CODE (newdecl) == TEMPLATE_DECL) { if (!template_heads_equivalent_p (newdecl, olddecl)) return 0; tree oldres = DECL_TEMPLATE_RESULT (olddecl); tree newres = DECL_TEMPLATE_RESULT (newdecl); if (TREE_CODE (newres) != TREE_CODE (oldres)) return 0; /* Two template types match if they are the same. Otherwise, compare the underlying declarations. */ if (TREE_CODE (newres) == TYPE_DECL) types_match = same_type_p (TREE_TYPE (newres), TREE_TYPE (oldres)); else types_match = decls_match (newres, oldres); } else { /* Need to check scope for variable declaration (VAR_DECL). For typedef (TYPE_DECL), scope is ignored. */ if (VAR_P (newdecl) && CP_DECL_CONTEXT (newdecl) != CP_DECL_CONTEXT (olddecl) /* [dcl.link] Two declarations for an object with C language linkage with the same name (ignoring the namespace that qualify it) that appear in different namespace scopes refer to the same object. */ && !(DECL_EXTERN_C_P (olddecl) && DECL_EXTERN_C_P (newdecl))) return 0; if (TREE_TYPE (newdecl) == error_mark_node) types_match = TREE_TYPE (olddecl) == error_mark_node; else if (TREE_TYPE (olddecl) == NULL_TREE) types_match = TREE_TYPE (newdecl) == NULL_TREE; else if (TREE_TYPE (newdecl) == NULL_TREE) types_match = 0; else types_match = comptypes (TREE_TYPE (newdecl), TREE_TYPE (olddecl), COMPARE_REDECLARATION); } return types_match; } /* NEWDECL and OLDDECL have identical signatures. If they are different versions adjust them and return true. If RECORD is set to true, record function versions. */ bool maybe_version_functions (tree newdecl, tree olddecl, bool record) { if (!targetm.target_option.function_versions (newdecl, olddecl)) return false; if (!DECL_FUNCTION_VERSIONED (olddecl)) { DECL_FUNCTION_VERSIONED (olddecl) = 1; if (DECL_ASSEMBLER_NAME_SET_P (olddecl)) mangle_decl (olddecl); } if (!DECL_FUNCTION_VERSIONED (newdecl)) { DECL_FUNCTION_VERSIONED (newdecl) = 1; if (DECL_ASSEMBLER_NAME_SET_P (newdecl)) mangle_decl (newdecl); } if (record) cgraph_node::record_function_versions (olddecl, newdecl); return true; } /* If NEWDECL is `static' and an `extern' was seen previously, warn about it. OLDDECL is the previous declaration. Note that this does not apply to the C++ case of declaring a variable `extern const' and then later `const'. Don't complain about built-in functions, since they are beyond the user's control. */ void warn_extern_redeclared_static (tree newdecl, tree olddecl) { if (TREE_CODE (newdecl) == TYPE_DECL || TREE_CODE (newdecl) == TEMPLATE_DECL || TREE_CODE (newdecl) == CONST_DECL || TREE_CODE (newdecl) == NAMESPACE_DECL) return; /* Don't get confused by static member functions; that's a different use of `static'. */ if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_STATIC_FUNCTION_P (newdecl)) return; /* If the old declaration was `static', or the new one isn't, then everything is OK. */ if (DECL_THIS_STATIC (olddecl) || !DECL_THIS_STATIC (newdecl)) return; /* It's OK to declare a builtin function as `static'. */ if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_ARTIFICIAL (olddecl)) return; auto_diagnostic_group d; if (permerror (DECL_SOURCE_LOCATION (newdecl), "%qD was declared %<extern%> and later %<static%>", newdecl)) inform (DECL_SOURCE_LOCATION (olddecl), "previous declaration of %qD", olddecl); } /* NEW_DECL is a redeclaration of OLD_DECL; both are functions or function templates. If their exception specifications do not match, issue a diagnostic. */ static void check_redeclaration_exception_specification (tree new_decl, tree old_decl) { tree new_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (new_decl)); tree old_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (old_decl)); /* Two default specs are equivalent, don't force evaluation. */ if (UNEVALUATED_NOEXCEPT_SPEC_P (new_exceptions) && UNEVALUATED_NOEXCEPT_SPEC_P (old_exceptions)) return; if (!type_dependent_expression_p (old_decl)) { maybe_instantiate_noexcept (new_decl); maybe_instantiate_noexcept (old_decl); } new_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (new_decl)); old_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (old_decl)); /* [except.spec] If any declaration of a function has an exception-specification, all declarations, including the definition and an explicit specialization, of that function shall have an exception-specification with the same set of type-ids. */ if (! DECL_IS_BUILTIN (old_decl) && !comp_except_specs (new_exceptions, old_exceptions, ce_normal)) { const char *const msg = G_("declaration of %qF has a different exception specifier"); bool complained = true; location_t new_loc = DECL_SOURCE_LOCATION (new_decl); auto_diagnostic_group d; if (DECL_IN_SYSTEM_HEADER (old_decl)) complained = pedwarn (new_loc, OPT_Wsystem_headers, msg, new_decl); else if (!flag_exceptions) /* We used to silently permit mismatched eh specs with -fno-exceptions, so make them a pedwarn now. */ complained = pedwarn (new_loc, OPT_Wpedantic, msg, new_decl); else error_at (new_loc, msg, new_decl); if (complained) inform (DECL_SOURCE_LOCATION (old_decl), "from previous declaration %qF", old_decl); } } /* Return true if OLD_DECL and NEW_DECL agree on constexprness. Otherwise issue diagnostics. */ static bool validate_constexpr_redeclaration (tree old_decl, tree new_decl) { old_decl = STRIP_TEMPLATE (old_decl); new_decl = STRIP_TEMPLATE (new_decl); if (!VAR_OR_FUNCTION_DECL_P (old_decl) || !VAR_OR_FUNCTION_DECL_P (new_decl)) return true; if (DECL_DECLARED_CONSTEXPR_P (old_decl) == DECL_DECLARED_CONSTEXPR_P (new_decl)) { if (TREE_CODE (old_decl) != FUNCTION_DECL) return true; if (DECL_IMMEDIATE_FUNCTION_P (old_decl) == DECL_IMMEDIATE_FUNCTION_P (new_decl)) return true; } if (TREE_CODE (old_decl) == FUNCTION_DECL) { if (fndecl_built_in_p (old_decl)) { /* Hide a built-in declaration. */ DECL_DECLARED_CONSTEXPR_P (old_decl) = DECL_DECLARED_CONSTEXPR_P (new_decl); if (DECL_IMMEDIATE_FUNCTION_P (new_decl)) SET_DECL_IMMEDIATE_FUNCTION_P (old_decl); return true; } /* 7.1.5 [dcl.constexpr] Note: An explicit specialization can differ from the template declaration with respect to the constexpr specifier. */ if (! DECL_TEMPLATE_SPECIALIZATION (old_decl) && DECL_TEMPLATE_SPECIALIZATION (new_decl)) return true; const char *kind = "constexpr"; if (DECL_IMMEDIATE_FUNCTION_P (old_decl) || DECL_IMMEDIATE_FUNCTION_P (new_decl)) kind = "consteval"; error_at (DECL_SOURCE_LOCATION (new_decl), "redeclaration %qD differs in %qs " "from previous declaration", new_decl, kind); inform (DECL_SOURCE_LOCATION (old_decl), "previous declaration %qD", old_decl); return false; } return true; } // If OLDDECL and NEWDECL are concept declarations with the same type // (i.e., and template parameters), but different requirements, // emit diagnostics and return true. Otherwise, return false. static inline bool check_concept_refinement (tree olddecl, tree newdecl) { if (!DECL_DECLARED_CONCEPT_P (olddecl) || !DECL_DECLARED_CONCEPT_P (newdecl)) return false; tree d1 = DECL_TEMPLATE_RESULT (olddecl); tree d2 = DECL_TEMPLATE_RESULT (newdecl); if (TREE_CODE (d1) != TREE_CODE (d2)) return false; tree t1 = TREE_TYPE (d1); tree t2 = TREE_TYPE (d2); if (TREE_CODE (d1) == FUNCTION_DECL) { if (compparms (TYPE_ARG_TYPES (t1), TYPE_ARG_TYPES (t2)) && comp_template_parms (DECL_TEMPLATE_PARMS (olddecl), DECL_TEMPLATE_PARMS (newdecl)) && !equivalently_constrained (olddecl, newdecl)) { error ("cannot specialize concept %q#D", olddecl); return true; } } return false; } /* DECL is a redeclaration of a function or function template. If it does have default arguments issue a diagnostic. Note: this function is used to enforce the requirements in C++11 8.3.6 about no default arguments in redeclarations. */ static void check_redeclaration_no_default_args (tree decl) { gcc_assert (DECL_DECLARES_FUNCTION_P (decl)); for (tree t = FUNCTION_FIRST_USER_PARMTYPE (decl); t && t != void_list_node; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) { permerror (DECL_SOURCE_LOCATION (decl), "redeclaration of %q#D may not have default " "arguments", decl); return; } } /* NEWDECL is a redeclaration of a function or function template OLDDECL, in any case represented as FUNCTION_DECLs (the DECL_TEMPLATE_RESULTs of the TEMPLATE_DECLs in case of function templates). This function is used to enforce the final part of C++17 11.3.6/4, about a single declaration: "If a friend declaration specifies a default argument expression, that declaration shall be a definition and shall be the only declaration of the function or function template in the translation unit." */ static void check_no_redeclaration_friend_default_args (tree olddecl, tree newdecl) { if (!DECL_UNIQUE_FRIEND_P (olddecl) && !DECL_UNIQUE_FRIEND_P (newdecl)) return; for (tree t1 = FUNCTION_FIRST_USER_PARMTYPE (olddecl), t2 = FUNCTION_FIRST_USER_PARMTYPE (newdecl); t1 && t1 != void_list_node; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2)) if ((DECL_UNIQUE_FRIEND_P (olddecl) && TREE_PURPOSE (t1)) || (DECL_UNIQUE_FRIEND_P (newdecl) && TREE_PURPOSE (t2))) { auto_diagnostic_group d; if (permerror (DECL_SOURCE_LOCATION (newdecl), "friend declaration of %q#D specifies default " "arguments and isn%'t the only declaration", newdecl)) inform (DECL_SOURCE_LOCATION (olddecl), "previous declaration of %q#D", olddecl); return; } } /* Merge tree bits that correspond to attributes noreturn, nothrow, const, malloc, and pure from NEWDECL with those of OLDDECL. */ static void merge_attribute_bits (tree newdecl, tree olddecl) { TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); TREE_THIS_VOLATILE (olddecl) |= TREE_THIS_VOLATILE (newdecl); TREE_NOTHROW (newdecl) |= TREE_NOTHROW (olddecl); TREE_NOTHROW (olddecl) |= TREE_NOTHROW (newdecl); TREE_READONLY (newdecl) |= TREE_READONLY (olddecl); TREE_READONLY (olddecl) |= TREE_READONLY (newdecl); DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl); DECL_IS_MALLOC (olddecl) |= DECL_IS_MALLOC (newdecl); DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl); DECL_PURE_P (olddecl) |= DECL_PURE_P (newdecl); DECL_UNINLINABLE (newdecl) |= DECL_UNINLINABLE (olddecl); DECL_UNINLINABLE (olddecl) |= DECL_UNINLINABLE (newdecl); } #define GNU_INLINE_P(fn) (DECL_DECLARED_INLINE_P (fn) \ && lookup_attribute ("gnu_inline", \ DECL_ATTRIBUTES (fn))) /* A subroutine of duplicate_decls. Emits a diagnostic when newdecl ambiguates olddecl. Returns true if an error occurs. */ static bool duplicate_function_template_decls (tree newdecl, tree olddecl) { tree newres = DECL_TEMPLATE_RESULT (newdecl); tree oldres = DECL_TEMPLATE_RESULT (olddecl); /* Function template declarations can be differentiated by parameter and return type. */ if (compparms (TYPE_ARG_TYPES (TREE_TYPE (oldres)), TYPE_ARG_TYPES (TREE_TYPE (newres))) && same_type_p (TREE_TYPE (TREE_TYPE (newdecl)), TREE_TYPE (TREE_TYPE (olddecl)))) { /* ... and also by their template-heads and requires-clauses. */ if (template_heads_equivalent_p (newdecl, olddecl) && function_requirements_equivalent_p (newres, oldres)) { error ("ambiguating new declaration %q+#D", newdecl); inform (DECL_SOURCE_LOCATION (olddecl), "old declaration %q#D", olddecl); return true; } /* FIXME: The types are the same but the are differences in either the template heads or function requirements. We should be able to diagnose a set of common errors stemming from these declarations. For example: template<typename T> requires C void f(...); template<typename T> void f(...) requires C; These are functionally equivalent but not equivalent. */ } return false; } /* If NEWDECL is a redeclaration of OLDDECL, merge the declarations. If the redeclaration is invalid, a diagnostic is issued, and the error_mark_node is returned. Otherwise, OLDDECL is returned. If NEWDECL is not a redeclaration of OLDDECL, NULL_TREE is returned. HIDING is true if the new decl is being hidden. WAS_HIDDEN is true if the old decl was hidden. Hidden decls can be anticipated builtins, injected friends, or (coming soon) injected from a local-extern decl. */ tree duplicate_decls (tree newdecl, tree olddecl, bool hiding, bool was_hidden) { unsigned olddecl_uid = DECL_UID (olddecl); int types_match = 0; int new_defines_function = 0; tree new_template_info; location_t olddecl_loc = DECL_SOURCE_LOCATION (olddecl); location_t newdecl_loc = DECL_SOURCE_LOCATION (newdecl); if (newdecl == olddecl) return olddecl; types_match = decls_match (newdecl, olddecl); /* If either the type of the new decl or the type of the old decl is an error_mark_node, then that implies that we have already issued an error (earlier) for some bogus type specification, and in that case, it is rather pointless to harass the user with yet more error message about the same declaration, so just pretend the types match here. */ if (TREE_TYPE (newdecl) == error_mark_node || TREE_TYPE (olddecl) == error_mark_node) return error_mark_node; /* Check for redeclaration and other discrepancies. */ if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_UNDECLARED_BUILTIN_P (olddecl)) { if (TREE_CODE (newdecl) != FUNCTION_DECL) { /* Avoid warnings redeclaring built-ins which have not been explicitly declared. */ if (was_hidden) { if (TREE_PUBLIC (newdecl) && CP_DECL_CONTEXT (newdecl) == global_namespace) warning_at (newdecl_loc, OPT_Wbuiltin_declaration_mismatch, "built-in function %qD declared as non-function", newdecl); return NULL_TREE; } /* If you declare a built-in or predefined function name as static, the old definition is overridden, but optionally warn this was a bad choice of name. */ if (! TREE_PUBLIC (newdecl)) { warning_at (newdecl_loc, OPT_Wshadow, fndecl_built_in_p (olddecl) ? G_("shadowing built-in function %q#D") : G_("shadowing library function %q#D"), olddecl); /* Discard the old built-in function. */ return NULL_TREE; } /* If the built-in is not ansi, then programs can override it even globally without an error. */ else if (! fndecl_built_in_p (olddecl)) warning_at (newdecl_loc, 0, "library function %q#D redeclared as non-function %q#D", olddecl, newdecl); else error_at (newdecl_loc, "declaration of %q#D conflicts with built-in " "declaration %q#D", newdecl, olddecl); return NULL_TREE; } else if (!types_match) { /* Avoid warnings redeclaring built-ins which have not been explicitly declared. */ if (was_hidden) { tree t1, t2; /* A new declaration doesn't match a built-in one unless it is also extern "C". */ gcc_assert (DECL_IS_BUILTIN (olddecl)); gcc_assert (DECL_EXTERN_C_P (olddecl)); if (!DECL_EXTERN_C_P (newdecl)) return NULL_TREE; for (t1 = TYPE_ARG_TYPES (TREE_TYPE (newdecl)), t2 = TYPE_ARG_TYPES (TREE_TYPE (olddecl)); t1 || t2; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2)) { if (!t1 || !t2) break; /* FILE, tm types are not known at the time we create the builtins. */ for (unsigned i = 0; i < sizeof (builtin_structptr_types) / sizeof (builtin_structptr_type); ++i) if (TREE_VALUE (t2) == builtin_structptr_types[i].node) { tree t = TREE_VALUE (t1); if (TYPE_PTR_P (t) && TYPE_IDENTIFIER (TREE_TYPE (t)) == get_identifier (builtin_structptr_types[i].str) && compparms (TREE_CHAIN (t1), TREE_CHAIN (t2))) { tree oldargs = TYPE_ARG_TYPES (TREE_TYPE (olddecl)); TYPE_ARG_TYPES (TREE_TYPE (olddecl)) = TYPE_ARG_TYPES (TREE_TYPE (newdecl)); types_match = decls_match (newdecl, olddecl); if (types_match) return duplicate_decls (newdecl, olddecl, hiding, was_hidden); TYPE_ARG_TYPES (TREE_TYPE (olddecl)) = oldargs; } goto next_arg; } if (! same_type_p (TREE_VALUE (t1), TREE_VALUE (t2))) break; next_arg:; } warning_at (newdecl_loc, OPT_Wbuiltin_declaration_mismatch, "declaration of %q#D conflicts with built-in " "declaration %q#D", newdecl, olddecl); } else if ((DECL_EXTERN_C_P (newdecl) && DECL_EXTERN_C_P (olddecl)) || compparms (TYPE_ARG_TYPES (TREE_TYPE (newdecl)), TYPE_ARG_TYPES (TREE_TYPE (olddecl)))) { /* Don't really override olddecl for __* prefixed builtins except for __[^b]*_chk, the compiler might be using those explicitly. */ if (fndecl_built_in_p (olddecl)) { tree id = DECL_NAME (olddecl); const char *name = IDENTIFIER_POINTER (id); size_t len; if (name[0] == '_' && name[1] == '_' && (strncmp (name + 2, "builtin_", strlen ("builtin_")) == 0 || (len = strlen (name)) <= strlen ("___chk") || memcmp (name + len - strlen ("_chk"), "_chk", strlen ("_chk") + 1) != 0)) { if (DECL_INITIAL (newdecl)) { error_at (newdecl_loc, "definition of %q#D ambiguates built-in " "declaration %q#D", newdecl, olddecl); return error_mark_node; } auto_diagnostic_group d; if (permerror (newdecl_loc, "new declaration %q#D ambiguates built-in" " declaration %q#D", newdecl, olddecl) && flag_permissive) inform (newdecl_loc, "ignoring the %q#D declaration", newdecl); return flag_permissive ? olddecl : error_mark_node; } } /* A near match; override the builtin. */ if (TREE_PUBLIC (newdecl)) warning_at (newdecl_loc, OPT_Wbuiltin_declaration_mismatch, "new declaration %q#D ambiguates built-in " "declaration %q#D", newdecl, olddecl); else warning (OPT_Wshadow, fndecl_built_in_p (olddecl) ? G_("shadowing built-in function %q#D") : G_("shadowing library function %q#D"), olddecl); } else /* Discard the old built-in function. */ return NULL_TREE; /* Replace the old RTL to avoid problems with inlining. */ COPY_DECL_RTL (newdecl, olddecl); } /* Even if the types match, prefer the new declarations type for built-ins which have not been explicitly declared, for exception lists, etc... */ else if (DECL_IS_BUILTIN (olddecl)) { tree type = TREE_TYPE (newdecl); tree attribs = (*targetm.merge_type_attributes) (TREE_TYPE (olddecl), type); type = cp_build_type_attribute_variant (type, attribs); TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = type; } /* If a function is explicitly declared "throw ()", propagate that to the corresponding builtin. */ if (DECL_BUILT_IN_CLASS (olddecl) == BUILT_IN_NORMAL && was_hidden && TREE_NOTHROW (newdecl) && !TREE_NOTHROW (olddecl)) { enum built_in_function fncode = DECL_FUNCTION_CODE (olddecl); tree tmpdecl = builtin_decl_explicit (fncode); if (tmpdecl && tmpdecl != olddecl && types_match) TREE_NOTHROW (tmpdecl) = 1; } /* Whether or not the builtin can throw exceptions has no bearing on this declarator. */ TREE_NOTHROW (olddecl) = 0; if (DECL_THIS_STATIC (newdecl) && !DECL_THIS_STATIC (olddecl)) { /* If a builtin function is redeclared as `static', merge the declarations, but make the original one static. */ DECL_THIS_STATIC (olddecl) = 1; TREE_PUBLIC (olddecl) = 0; /* Make the old declaration consistent with the new one so that all remnants of the builtin-ness of this function will be banished. */ SET_DECL_LANGUAGE (olddecl, DECL_LANGUAGE (newdecl)); COPY_DECL_RTL (newdecl, olddecl); } } else if (TREE_CODE (olddecl) != TREE_CODE (newdecl)) { /* C++ Standard, 3.3, clause 4: "[Note: a namespace name or a class template name must be unique in its declarative region (7.3.2, clause 14). ]" */ if (TREE_CODE (olddecl) == NAMESPACE_DECL || TREE_CODE (newdecl) == NAMESPACE_DECL) /* Namespace conflicts with not namespace. */; else if (DECL_TYPE_TEMPLATE_P (olddecl) || DECL_TYPE_TEMPLATE_P (newdecl)) /* Class template conflicts. */; else if ((TREE_CODE (olddecl) == TEMPLATE_DECL && DECL_TEMPLATE_RESULT (olddecl) && TREE_CODE (DECL_TEMPLATE_RESULT (olddecl)) == VAR_DECL) || (TREE_CODE (newdecl) == TEMPLATE_DECL && DECL_TEMPLATE_RESULT (newdecl) && TREE_CODE (DECL_TEMPLATE_RESULT (newdecl)) == VAR_DECL)) /* Variable template conflicts. */; else if (concept_definition_p (olddecl) || concept_definition_p (newdecl)) /* Concept conflicts. */; else if ((TREE_CODE (newdecl) == FUNCTION_DECL && DECL_FUNCTION_TEMPLATE_P (olddecl)) || (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_FUNCTION_TEMPLATE_P (newdecl))) { /* One is a function and the other is a template function. */ if (!UDLIT_OPER_P (DECL_NAME (newdecl))) return NULL_TREE; /* There can only be one! */ if (TREE_CODE (newdecl) == TEMPLATE_DECL && check_raw_literal_operator (olddecl)) error_at (newdecl_loc, "literal operator %q#D conflicts with" " raw literal operator", newdecl); else if (check_raw_literal_operator (newdecl)) error_at (newdecl_loc, "raw literal operator %q#D conflicts with" " literal operator template", newdecl); else return NULL_TREE; inform (olddecl_loc, "previous declaration %q#D", olddecl); return error_mark_node; } else if ((VAR_P (olddecl) && DECL_DECOMPOSITION_P (olddecl)) || (VAR_P (newdecl) && DECL_DECOMPOSITION_P (newdecl))) /* A structured binding must be unique in its declarative region. */; else if (DECL_IMPLICIT_TYPEDEF_P (olddecl) || DECL_IMPLICIT_TYPEDEF_P (newdecl)) /* One is an implicit typedef, that's ok. */ return NULL_TREE; error ("%q#D redeclared as different kind of entity", newdecl); inform (olddecl_loc, "previous declaration %q#D", olddecl); return error_mark_node; } else if (!types_match) { if (CP_DECL_CONTEXT (newdecl) != CP_DECL_CONTEXT (olddecl)) /* These are certainly not duplicate declarations; they're from different scopes. */ return NULL_TREE; if (TREE_CODE (newdecl) == TEMPLATE_DECL) { tree oldres = DECL_TEMPLATE_RESULT (olddecl); tree newres = DECL_TEMPLATE_RESULT (newdecl); /* The name of a class template may not be declared to refer to any other template, class, function, object, namespace, value, or type in the same scope. */ if (TREE_CODE (oldres) == TYPE_DECL || TREE_CODE (newres) == TYPE_DECL) { error_at (newdecl_loc, "conflicting declaration of template %q#D", newdecl); inform (olddecl_loc, "previous declaration %q#D", olddecl); return error_mark_node; } else if (TREE_CODE (oldres) == FUNCTION_DECL && TREE_CODE (newres) == FUNCTION_DECL) { if (duplicate_function_template_decls (newdecl, olddecl)) return error_mark_node; return NULL_TREE; } else if (check_concept_refinement (olddecl, newdecl)) return error_mark_node; return NULL_TREE; } if (TREE_CODE (newdecl) == FUNCTION_DECL) { if (DECL_EXTERN_C_P (newdecl) && DECL_EXTERN_C_P (olddecl)) { error_at (newdecl_loc, "conflicting declaration of C function %q#D", newdecl); inform (olddecl_loc, "previous declaration %q#D", olddecl); return NULL_TREE; } /* For function versions, params and types match, but they are not ambiguous. */ else if ((!DECL_FUNCTION_VERSIONED (newdecl) && !DECL_FUNCTION_VERSIONED (olddecl)) // The functions have the same parameter types. && compparms (TYPE_ARG_TYPES (TREE_TYPE (newdecl)), TYPE_ARG_TYPES (TREE_TYPE (olddecl))) // And the same constraints. && equivalently_constrained (newdecl, olddecl)) { error_at (newdecl_loc, "ambiguating new declaration of %q#D", newdecl); inform (olddecl_loc, "old declaration %q#D", olddecl); return error_mark_node; } else return NULL_TREE; } else { error_at (newdecl_loc, "conflicting declaration %q#D", newdecl); inform (olddecl_loc, "previous declaration as %q#D", olddecl); return error_mark_node; } } else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_OMP_DECLARE_REDUCTION_P (newdecl)) { /* OMP UDRs are never duplicates. */ gcc_assert (DECL_OMP_DECLARE_REDUCTION_P (olddecl)); error_at (newdecl_loc, "redeclaration of %<pragma omp declare reduction%>"); inform (olddecl_loc, "previous %<pragma omp declare reduction%> declaration"); return error_mark_node; } else if (TREE_CODE (newdecl) == FUNCTION_DECL && ((DECL_TEMPLATE_SPECIALIZATION (olddecl) && (!DECL_TEMPLATE_INFO (newdecl) || (DECL_TI_TEMPLATE (newdecl) != DECL_TI_TEMPLATE (olddecl)))) || (DECL_TEMPLATE_SPECIALIZATION (newdecl) && (!DECL_TEMPLATE_INFO (olddecl) || (DECL_TI_TEMPLATE (olddecl) != DECL_TI_TEMPLATE (newdecl)))))) /* It's OK to have a template specialization and a non-template with the same type, or to have specializations of two different templates with the same type. Note that if one is a specialization, and the other is an instantiation of the same template, that we do not exit at this point. That situation can occur if we instantiate a template class, and then specialize one of its methods. This situation is valid, but the declarations must be merged in the usual way. */ return NULL_TREE; else if (TREE_CODE (newdecl) == FUNCTION_DECL && ((DECL_TEMPLATE_INSTANTIATION (olddecl) && !DECL_USE_TEMPLATE (newdecl)) || (DECL_TEMPLATE_INSTANTIATION (newdecl) && !DECL_USE_TEMPLATE (olddecl)))) /* One of the declarations is a template instantiation, and the other is not a template at all. That's OK. */ return NULL_TREE; else if (TREE_CODE (newdecl) == NAMESPACE_DECL) { /* In [namespace.alias] we have: In a declarative region, a namespace-alias-definition can be used to redefine a namespace-alias declared in that declarative region to refer only to the namespace to which it already refers. Therefore, if we encounter a second alias directive for the same alias, we can just ignore the second directive. */ if (DECL_NAMESPACE_ALIAS (newdecl) && (DECL_NAMESPACE_ALIAS (newdecl) == DECL_NAMESPACE_ALIAS (olddecl))) return olddecl; /* Leave it to update_binding to merge or report error. */ return NULL_TREE; } else { const char *errmsg = redeclaration_error_message (newdecl, olddecl); if (errmsg) { auto_diagnostic_group d; error_at (newdecl_loc, errmsg, newdecl); if (DECL_NAME (olddecl) != NULL_TREE) inform (olddecl_loc, (DECL_INITIAL (olddecl) && namespace_bindings_p ()) ? G_("%q#D previously defined here") : G_("%q#D previously declared here"), olddecl); return error_mark_node; } else if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_INITIAL (olddecl) != NULL_TREE && !prototype_p (TREE_TYPE (olddecl)) && prototype_p (TREE_TYPE (newdecl))) { /* Prototype decl follows defn w/o prototype. */ auto_diagnostic_group d; if (warning_at (newdecl_loc, 0, "prototype specified for %q#D", newdecl)) inform (olddecl_loc, "previous non-prototype definition here"); } else if (VAR_OR_FUNCTION_DECL_P (olddecl) && DECL_LANGUAGE (newdecl) != DECL_LANGUAGE (olddecl)) { /* [dcl.link] If two declarations of the same function or object specify different linkage-specifications ..., the program is ill-formed.... Except for functions with C++ linkage, a function declaration without a linkage specification shall not precede the first linkage specification for that function. A function can be declared without a linkage specification after an explicit linkage specification has been seen; the linkage explicitly specified in the earlier declaration is not affected by such a function declaration. DR 563 raises the question why the restrictions on functions should not also apply to objects. Older versions of G++ silently ignore the linkage-specification for this example: namespace N { extern int i; extern "C" int i; } which is clearly wrong. Therefore, we now treat objects like functions. */ if (current_lang_depth () == 0) { /* There is no explicit linkage-specification, so we use the linkage from the previous declaration. */ retrofit_lang_decl (newdecl); SET_DECL_LANGUAGE (newdecl, DECL_LANGUAGE (olddecl)); } else { auto_diagnostic_group d; error_at (newdecl_loc, "conflicting declaration of %q#D with %qL linkage", newdecl, DECL_LANGUAGE (newdecl)); inform (olddecl_loc, "previous declaration with %qL linkage", DECL_LANGUAGE (olddecl)); } } if (DECL_LANG_SPECIFIC (olddecl) && DECL_USE_TEMPLATE (olddecl)) ; else if (TREE_CODE (olddecl) == FUNCTION_DECL) { /* Note: free functions, as TEMPLATE_DECLs, are handled below. */ if (DECL_FUNCTION_MEMBER_P (olddecl) && (/* grokfndecl passes member function templates too as FUNCTION_DECLs. */ DECL_TEMPLATE_INFO (olddecl) /* C++11 8.3.6/6. Default arguments for a member function of a class template shall be specified on the initial declaration of the member function within the class template. */ || CLASSTYPE_TEMPLATE_INFO (CP_DECL_CONTEXT (olddecl)))) check_redeclaration_no_default_args (newdecl); else { tree t1 = FUNCTION_FIRST_USER_PARMTYPE (olddecl); tree t2 = FUNCTION_FIRST_USER_PARMTYPE (newdecl); int i = 1; for (; t1 && t1 != void_list_node; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2), i++) if (TREE_PURPOSE (t1) && TREE_PURPOSE (t2)) { if (simple_cst_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2)) == 1) { auto_diagnostic_group d; if (permerror (newdecl_loc, "default argument given for parameter " "%d of %q#D", i, newdecl)) inform (olddecl_loc, "previous specification in %q#D here", olddecl); } else { auto_diagnostic_group d; error_at (newdecl_loc, "default argument given for parameter %d " "of %q#D", i, newdecl); inform (olddecl_loc, "previous specification in %q#D here", olddecl); } } /* C++17 11.3.6/4: "If a friend declaration specifies a default argument expression, that declaration... shall be the only declaration of the function or function template in the translation unit." */ check_no_redeclaration_friend_default_args (olddecl, newdecl); } } } /* Do not merge an implicit typedef with an explicit one. In: class A; ... typedef class A A __attribute__ ((foo)); the attribute should apply only to the typedef. */ if (TREE_CODE (olddecl) == TYPE_DECL && (DECL_IMPLICIT_TYPEDEF_P (olddecl) || DECL_IMPLICIT_TYPEDEF_P (newdecl))) return NULL_TREE; if (!validate_constexpr_redeclaration (olddecl, newdecl)) return error_mark_node; /* We have committed to returning OLDDECL at this point. */ /* If new decl is `static' and an `extern' was seen previously, warn about it. */ warn_extern_redeclared_static (newdecl, olddecl); /* True to merge attributes between the declarations, false to set OLDDECL's attributes to those of NEWDECL (for template explicit specializations that specify their own attributes independent of those specified for the primary template). */ const bool merge_attr = (TREE_CODE (newdecl) != FUNCTION_DECL || !DECL_TEMPLATE_SPECIALIZATION (newdecl) || DECL_TEMPLATE_SPECIALIZATION (olddecl)); if (TREE_CODE (newdecl) == FUNCTION_DECL) { if (merge_attr) { if (diagnose_mismatched_attributes (olddecl, newdecl)) inform (olddecl_loc, DECL_INITIAL (olddecl) ? G_("previous definition of %qD here") : G_("previous declaration of %qD here"), olddecl); /* [dcl.attr.noreturn]: The first declaration of a function shall specify the noreturn attribute if any declaration of that function specifies the noreturn attribute. */ tree a; if (TREE_THIS_VOLATILE (newdecl) && !TREE_THIS_VOLATILE (olddecl) /* This applies to [[noreturn]] only, not its GNU variants. */ && (a = lookup_attribute ("noreturn", DECL_ATTRIBUTES (newdecl))) && cxx11_attribute_p (a) && get_attribute_namespace (a) == NULL_TREE) { error_at (newdecl_loc, "function %qD declared %<[[noreturn]]%> " "but its first declaration was not", newdecl); inform (olddecl_loc, "previous declaration of %qD", olddecl); } } /* Now that functions must hold information normally held by field decls, there is extra work to do so that declaration information does not get destroyed during definition. */ if (DECL_VINDEX (olddecl)) DECL_VINDEX (newdecl) = DECL_VINDEX (olddecl); if (DECL_CONTEXT (olddecl)) DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl); DECL_STATIC_CONSTRUCTOR (newdecl) |= DECL_STATIC_CONSTRUCTOR (olddecl); DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl); DECL_PURE_VIRTUAL_P (newdecl) |= DECL_PURE_VIRTUAL_P (olddecl); DECL_VIRTUAL_P (newdecl) |= DECL_VIRTUAL_P (olddecl); DECL_INVALID_OVERRIDER_P (newdecl) |= DECL_INVALID_OVERRIDER_P (olddecl); DECL_FINAL_P (newdecl) |= DECL_FINAL_P (olddecl); DECL_OVERRIDE_P (newdecl) |= DECL_OVERRIDE_P (olddecl); DECL_THIS_STATIC (newdecl) |= DECL_THIS_STATIC (olddecl); DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (newdecl) |= DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (olddecl); if (DECL_OVERLOADED_OPERATOR_P (olddecl)) DECL_OVERLOADED_OPERATOR_CODE_RAW (newdecl) = DECL_OVERLOADED_OPERATOR_CODE_RAW (olddecl); new_defines_function = DECL_INITIAL (newdecl) != NULL_TREE; /* Optionally warn about more than one declaration for the same name, but don't warn about a function declaration followed by a definition. */ if (warn_redundant_decls && ! DECL_ARTIFICIAL (olddecl) && !(new_defines_function && DECL_INITIAL (olddecl) == NULL_TREE) /* Don't warn about extern decl followed by definition. */ && !(DECL_EXTERNAL (olddecl) && ! DECL_EXTERNAL (newdecl)) /* Don't warn if at least one is/was hidden. */ && !(hiding || was_hidden) /* Don't warn about declaration followed by specialization. */ && (! DECL_TEMPLATE_SPECIALIZATION (newdecl) || DECL_TEMPLATE_SPECIALIZATION (olddecl))) { auto_diagnostic_group d; if (warning_at (newdecl_loc, OPT_Wredundant_decls, "redundant redeclaration of %qD in same scope", newdecl)) inform (olddecl_loc, "previous declaration of %qD", olddecl); } /* [dcl.fct.def.delete] A deleted definition of a function shall be the first declaration of the function or, for an explicit specialization of a function template, the first declaration of that specialization. */ if (!(DECL_TEMPLATE_INSTANTIATION (olddecl) && DECL_TEMPLATE_SPECIALIZATION (newdecl))) { if (DECL_DELETED_FN (newdecl)) { auto_diagnostic_group d; pedwarn (newdecl_loc, OPT_Wpedantic, "deleted definition of %qD is not first declaration", newdecl); inform (olddecl_loc, "previous declaration of %qD", olddecl); } DECL_DELETED_FN (newdecl) |= DECL_DELETED_FN (olddecl); } } /* Deal with C++: must preserve virtual function table size. */ if (TREE_CODE (olddecl) == TYPE_DECL) { tree newtype = TREE_TYPE (newdecl); tree oldtype = TREE_TYPE (olddecl); if (newtype != error_mark_node && oldtype != error_mark_node && TYPE_LANG_SPECIFIC (newtype) && TYPE_LANG_SPECIFIC (oldtype)) CLASSTYPE_FRIEND_CLASSES (newtype) = CLASSTYPE_FRIEND_CLASSES (oldtype); DECL_ORIGINAL_TYPE (newdecl) = DECL_ORIGINAL_TYPE (olddecl); } /* Copy all the DECL_... slots specified in the new decl except for any that we copy here from the old type. */ if (merge_attr) DECL_ATTRIBUTES (newdecl) = (*targetm.merge_decl_attributes) (olddecl, newdecl); else DECL_ATTRIBUTES (olddecl) = DECL_ATTRIBUTES (newdecl); if (TREE_CODE (newdecl) == TEMPLATE_DECL) { tree old_result = DECL_TEMPLATE_RESULT (olddecl); tree new_result = DECL_TEMPLATE_RESULT (newdecl); TREE_TYPE (olddecl) = TREE_TYPE (old_result); /* The new decl should not already have gathered any specializations. */ gcc_assert (!DECL_TEMPLATE_SPECIALIZATIONS (newdecl)); DECL_ATTRIBUTES (old_result) = (*targetm.merge_decl_attributes) (old_result, new_result); if (DECL_FUNCTION_TEMPLATE_P (newdecl)) { if (DECL_SOURCE_LOCATION (newdecl) != DECL_SOURCE_LOCATION (olddecl)) { /* Per C++11 8.3.6/4, default arguments cannot be added in later declarations of a function template. */ check_redeclaration_no_default_args (newdecl); /* C++17 11.3.6/4: "If a friend declaration specifies a default argument expression, that declaration... shall be the only declaration of the function or function template in the translation unit." */ check_no_redeclaration_friend_default_args (old_result, new_result); } if (!DECL_UNIQUE_FRIEND_P (old_result)) DECL_UNIQUE_FRIEND_P (new_result) = false; check_default_args (newdecl); if (GNU_INLINE_P (old_result) != GNU_INLINE_P (new_result) && DECL_INITIAL (new_result)) { if (DECL_INITIAL (old_result)) DECL_UNINLINABLE (old_result) = 1; else DECL_UNINLINABLE (old_result) = DECL_UNINLINABLE (new_result); DECL_EXTERNAL (old_result) = DECL_EXTERNAL (new_result); DECL_NOT_REALLY_EXTERN (old_result) = DECL_NOT_REALLY_EXTERN (new_result); DECL_INTERFACE_KNOWN (old_result) = DECL_INTERFACE_KNOWN (new_result); DECL_DECLARED_INLINE_P (old_result) = DECL_DECLARED_INLINE_P (new_result); DECL_DISREGARD_INLINE_LIMITS (old_result) |= DECL_DISREGARD_INLINE_LIMITS (new_result); } else { DECL_DECLARED_INLINE_P (old_result) |= DECL_DECLARED_INLINE_P (new_result); DECL_DISREGARD_INLINE_LIMITS (old_result) |= DECL_DISREGARD_INLINE_LIMITS (new_result); check_redeclaration_exception_specification (newdecl, olddecl); merge_attribute_bits (new_result, old_result); } } /* If the new declaration is a definition, update the file and line information on the declaration, and also make the old declaration the same definition. */ if (DECL_INITIAL (new_result) != NULL_TREE) { DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (old_result) = DECL_SOURCE_LOCATION (newdecl); DECL_INITIAL (old_result) = DECL_INITIAL (new_result); if (DECL_FUNCTION_TEMPLATE_P (newdecl)) { tree parm; DECL_ARGUMENTS (old_result) = DECL_ARGUMENTS (new_result); for (parm = DECL_ARGUMENTS (old_result); parm; parm = DECL_CHAIN (parm)) DECL_CONTEXT (parm) = old_result; } } return olddecl; } if (types_match) { if (TREE_CODE (newdecl) == FUNCTION_DECL) check_redeclaration_exception_specification (newdecl, olddecl); /* Automatically handles default parameters. */ tree oldtype = TREE_TYPE (olddecl); tree newtype; /* For typedefs use the old type, as the new type's DECL_NAME points at newdecl, which will be ggc_freed. */ if (TREE_CODE (newdecl) == TYPE_DECL) { /* But NEWTYPE might have an attribute, honor that. */ tree tem = TREE_TYPE (newdecl); newtype = oldtype; if (TYPE_USER_ALIGN (tem)) { if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype)) SET_TYPE_ALIGN (newtype, TYPE_ALIGN (tem)); TYPE_USER_ALIGN (newtype) = true; } /* And remove the new type from the variants list. */ if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl) { tree remove = TREE_TYPE (newdecl); if (TYPE_MAIN_VARIANT (remove) == remove) { gcc_assert (TYPE_NEXT_VARIANT (remove) == NULL_TREE); /* If remove is the main variant, no need to remove that from the list. One of the DECL_ORIGINAL_TYPE variants, e.g. created for aligned attribute, might still refer to the newdecl TYPE_DECL though, so remove that one in that case. */ if (tree orig = DECL_ORIGINAL_TYPE (newdecl)) if (orig != remove) for (tree t = TYPE_MAIN_VARIANT (orig); t; t = TYPE_MAIN_VARIANT (t)) if (TYPE_NAME (TYPE_NEXT_VARIANT (t)) == newdecl) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (TYPE_NEXT_VARIANT (t)); break; } } else for (tree t = TYPE_MAIN_VARIANT (remove); ; t = TYPE_NEXT_VARIANT (t)) if (TYPE_NEXT_VARIANT (t) == remove) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove); break; } } } else if (merge_attr) newtype = merge_types (TREE_TYPE (newdecl), TREE_TYPE (olddecl)); else newtype = TREE_TYPE (newdecl); if (VAR_P (newdecl)) { DECL_THIS_EXTERN (newdecl) |= DECL_THIS_EXTERN (olddecl); /* For already initialized vars, TREE_READONLY could have been cleared in cp_finish_decl, because the var needs runtime initialization or destruction. Make sure not to set TREE_READONLY on it again. */ if (DECL_INITIALIZED_P (olddecl) && !DECL_EXTERNAL (olddecl) && !TREE_READONLY (olddecl)) TREE_READONLY (newdecl) = 0; DECL_INITIALIZED_P (newdecl) |= DECL_INITIALIZED_P (olddecl); DECL_NONTRIVIALLY_INITIALIZED_P (newdecl) |= DECL_NONTRIVIALLY_INITIALIZED_P (olddecl); if (DECL_DEPENDENT_INIT_P (olddecl)) SET_DECL_DEPENDENT_INIT_P (newdecl, true); DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (newdecl) |= DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (olddecl); DECL_DECLARED_CONSTEXPR_P (newdecl) |= DECL_DECLARED_CONSTEXPR_P (olddecl); DECL_DECLARED_CONSTINIT_P (newdecl) |= DECL_DECLARED_CONSTINIT_P (olddecl); /* Merge the threadprivate attribute from OLDDECL into NEWDECL. */ if (DECL_LANG_SPECIFIC (olddecl) && CP_DECL_THREADPRIVATE_P (olddecl)) { /* Allocate a LANG_SPECIFIC structure for NEWDECL, if needed. */ retrofit_lang_decl (newdecl); CP_DECL_THREADPRIVATE_P (newdecl) = 1; } } /* An explicit specialization of a function template or of a member function of a class template can be declared transaction_safe independently of whether the corresponding template entity is declared transaction_safe. */ if (flag_tm && TREE_CODE (newdecl) == FUNCTION_DECL && DECL_TEMPLATE_INSTANTIATION (olddecl) && DECL_TEMPLATE_SPECIALIZATION (newdecl) && tx_safe_fn_type_p (newtype) && !tx_safe_fn_type_p (TREE_TYPE (newdecl))) newtype = tx_unsafe_fn_variant (newtype); TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = newtype; if (TREE_CODE (newdecl) == FUNCTION_DECL) check_default_args (newdecl); /* Lay the type out, unless already done. */ if (! same_type_p (newtype, oldtype) && TREE_TYPE (newdecl) != error_mark_node && !(processing_template_decl && uses_template_parms (newdecl))) layout_type (TREE_TYPE (newdecl)); if ((VAR_P (newdecl) || TREE_CODE (newdecl) == PARM_DECL || TREE_CODE (newdecl) == RESULT_DECL || TREE_CODE (newdecl) == FIELD_DECL || TREE_CODE (newdecl) == TYPE_DECL) && !(processing_template_decl && uses_template_parms (newdecl))) layout_decl (newdecl, 0); /* Merge deprecatedness. */ if (TREE_DEPRECATED (newdecl)) TREE_DEPRECATED (olddecl) = 1; /* Preserve function specific target and optimization options */ if (TREE_CODE (newdecl) == FUNCTION_DECL) { if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl) && !DECL_FUNCTION_SPECIFIC_TARGET (newdecl)) DECL_FUNCTION_SPECIFIC_TARGET (newdecl) = DECL_FUNCTION_SPECIFIC_TARGET (olddecl); if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl) && !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)) DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl) = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl); if (!DECL_UNIQUE_FRIEND_P (olddecl)) DECL_UNIQUE_FRIEND_P (newdecl) = false; } else { /* Merge the const type qualifier. */ if (TREE_READONLY (newdecl)) TREE_READONLY (olddecl) = 1; /* Merge the volatile type qualifier. */ if (TREE_THIS_VOLATILE (newdecl)) TREE_THIS_VOLATILE (olddecl) = 1; } /* Merge the initialization information. */ if (DECL_INITIAL (newdecl) == NULL_TREE && DECL_INITIAL (olddecl) != NULL_TREE) { DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); if (TREE_CODE (newdecl) == FUNCTION_DECL) { DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl); DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl); } } if (TREE_CODE (newdecl) == FUNCTION_DECL) { DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl) |= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl); DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl); if (DECL_IS_OPERATOR_NEW_P (olddecl)) DECL_SET_IS_OPERATOR_NEW (newdecl, true); DECL_LOOPING_CONST_OR_PURE_P (newdecl) |= DECL_LOOPING_CONST_OR_PURE_P (olddecl); DECL_IS_REPLACEABLE_OPERATOR (newdecl) |= DECL_IS_REPLACEABLE_OPERATOR (olddecl); if (merge_attr) merge_attribute_bits (newdecl, olddecl); else { /* Merge the noreturn bit. */ TREE_THIS_VOLATILE (olddecl) = TREE_THIS_VOLATILE (newdecl); TREE_READONLY (olddecl) = TREE_READONLY (newdecl); TREE_NOTHROW (olddecl) = TREE_NOTHROW (newdecl); DECL_IS_MALLOC (olddecl) = DECL_IS_MALLOC (newdecl); DECL_PURE_P (olddecl) = DECL_PURE_P (newdecl); } /* Keep the old RTL. */ COPY_DECL_RTL (olddecl, newdecl); } else if (VAR_P (newdecl) && (DECL_SIZE (olddecl) || !DECL_SIZE (newdecl))) { /* Keep the old RTL. We cannot keep the old RTL if the old declaration was for an incomplete object and the new declaration is not since many attributes of the RTL will change. */ COPY_DECL_RTL (olddecl, newdecl); } } /* If cannot merge, then use the new type and qualifiers, and don't preserve the old rtl. */ else { /* Clean out any memory we had of the old declaration. */ tree oldstatic = value_member (olddecl, static_aggregates); if (oldstatic) TREE_VALUE (oldstatic) = error_mark_node; TREE_TYPE (olddecl) = TREE_TYPE (newdecl); TREE_READONLY (olddecl) = TREE_READONLY (newdecl); TREE_THIS_VOLATILE (olddecl) = TREE_THIS_VOLATILE (newdecl); TREE_NOTHROW (olddecl) = TREE_NOTHROW (newdecl); TREE_SIDE_EFFECTS (olddecl) = TREE_SIDE_EFFECTS (newdecl); } /* Merge the storage class information. */ merge_weak (newdecl, olddecl); DECL_DEFER_OUTPUT (newdecl) |= DECL_DEFER_OUTPUT (olddecl); TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl); TREE_STATIC (olddecl) = TREE_STATIC (newdecl) |= TREE_STATIC (olddecl); if (! DECL_EXTERNAL (olddecl)) DECL_EXTERNAL (newdecl) = 0; if (! DECL_COMDAT (olddecl)) DECL_COMDAT (newdecl) = 0; new_template_info = NULL_TREE; if (DECL_LANG_SPECIFIC (newdecl) && DECL_LANG_SPECIFIC (olddecl)) { bool new_redefines_gnu_inline = false; if (new_defines_function && ((DECL_INTERFACE_KNOWN (olddecl) && TREE_CODE (olddecl) == FUNCTION_DECL) || (TREE_CODE (olddecl) == TEMPLATE_DECL && (TREE_CODE (DECL_TEMPLATE_RESULT (olddecl)) == FUNCTION_DECL)))) new_redefines_gnu_inline = GNU_INLINE_P (STRIP_TEMPLATE (olddecl)); if (!new_redefines_gnu_inline) { DECL_INTERFACE_KNOWN (newdecl) |= DECL_INTERFACE_KNOWN (olddecl); DECL_NOT_REALLY_EXTERN (newdecl) |= DECL_NOT_REALLY_EXTERN (olddecl); DECL_COMDAT (newdecl) |= DECL_COMDAT (olddecl); } DECL_TEMPLATE_INSTANTIATED (newdecl) |= DECL_TEMPLATE_INSTANTIATED (olddecl); DECL_ODR_USED (newdecl) |= DECL_ODR_USED (olddecl); /* If the OLDDECL is an instantiation and/or specialization, then the NEWDECL must be too. But, it may not yet be marked as such if the caller has created NEWDECL, but has not yet figured out that it is a redeclaration. */ if (!DECL_USE_TEMPLATE (newdecl)) DECL_USE_TEMPLATE (newdecl) = DECL_USE_TEMPLATE (olddecl); /* Don't really know how much of the language-specific values we should copy from old to new. */ DECL_IN_AGGR_P (newdecl) = DECL_IN_AGGR_P (olddecl); DECL_INITIALIZED_IN_CLASS_P (newdecl) |= DECL_INITIALIZED_IN_CLASS_P (olddecl); if (LANG_DECL_HAS_MIN (newdecl)) { DECL_ACCESS (newdecl) = DECL_ACCESS (olddecl); if (DECL_TEMPLATE_INFO (newdecl)) { new_template_info = DECL_TEMPLATE_INFO (newdecl); if (DECL_TEMPLATE_INSTANTIATION (olddecl) && DECL_TEMPLATE_SPECIALIZATION (newdecl)) /* Remember the presence of explicit specialization args. */ TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (olddecl)) = TINFO_USED_TEMPLATE_ID (new_template_info); } DECL_TEMPLATE_INFO (newdecl) = DECL_TEMPLATE_INFO (olddecl); } if (DECL_DECLARES_FUNCTION_P (newdecl)) { /* Only functions have these fields. */ DECL_NONCONVERTING_P (newdecl) = DECL_NONCONVERTING_P (olddecl); DECL_BEFRIENDING_CLASSES (newdecl) = chainon (DECL_BEFRIENDING_CLASSES (newdecl), DECL_BEFRIENDING_CLASSES (olddecl)); /* DECL_THUNKS is only valid for virtual functions, otherwise it is a DECL_FRIEND_CONTEXT. */ if (DECL_VIRTUAL_P (newdecl)) SET_DECL_THUNKS (newdecl, DECL_THUNKS (olddecl)); } else if (VAR_P (newdecl)) { /* Only variables have this field. */ if (VAR_HAD_UNKNOWN_BOUND (olddecl)) SET_VAR_HAD_UNKNOWN_BOUND (newdecl); } } if (TREE_CODE (newdecl) == FUNCTION_DECL) { tree parm; /* Merge parameter attributes. */ tree oldarg, newarg; for (oldarg = DECL_ARGUMENTS(olddecl), newarg = DECL_ARGUMENTS(newdecl); oldarg && newarg; oldarg = DECL_CHAIN(oldarg), newarg = DECL_CHAIN(newarg)) { DECL_ATTRIBUTES (newarg) = (*targetm.merge_decl_attributes) (oldarg, newarg); DECL_ATTRIBUTES (oldarg) = DECL_ATTRIBUTES (newarg); } if (DECL_TEMPLATE_INSTANTIATION (olddecl) && !DECL_TEMPLATE_INSTANTIATION (newdecl)) { /* If newdecl is not a specialization, then it is not a template-related function at all. And that means that we should have exited above, returning 0. */ gcc_assert (DECL_TEMPLATE_SPECIALIZATION (newdecl)); if (DECL_ODR_USED (olddecl)) /* From [temp.expl.spec]: If a template, a member template or the member of a class template is explicitly specialized then that specialization shall be declared before the first use of that specialization that would cause an implicit instantiation to take place, in every translation unit in which such a use occurs. */ error ("explicit specialization of %qD after first use", olddecl); SET_DECL_TEMPLATE_SPECIALIZATION (olddecl); DECL_COMDAT (newdecl) = (TREE_PUBLIC (newdecl) && DECL_DECLARED_INLINE_P (newdecl)); /* Don't propagate visibility from the template to the specialization here. We'll do that in determine_visibility if appropriate. */ DECL_VISIBILITY_SPECIFIED (olddecl) = 0; /* [temp.expl.spec/14] We don't inline explicit specialization just because the primary template says so. */ gcc_assert (!merge_attr); DECL_DECLARED_INLINE_P (olddecl) = DECL_DECLARED_INLINE_P (newdecl); DECL_DISREGARD_INLINE_LIMITS (olddecl) = DECL_DISREGARD_INLINE_LIMITS (newdecl); DECL_UNINLINABLE (olddecl) = DECL_UNINLINABLE (newdecl); } else if (new_defines_function && DECL_INITIAL (olddecl)) { /* Never inline re-defined extern inline functions. FIXME: this could be better handled by keeping both function as separate declarations. */ DECL_UNINLINABLE (newdecl) = 1; } else { if (DECL_PENDING_INLINE_P (olddecl)) { DECL_PENDING_INLINE_P (newdecl) = 1; DECL_PENDING_INLINE_INFO (newdecl) = DECL_PENDING_INLINE_INFO (olddecl); } else if (DECL_PENDING_INLINE_P (newdecl)) ; else if (DECL_SAVED_AUTO_RETURN_TYPE (newdecl) == NULL) DECL_SAVED_AUTO_RETURN_TYPE (newdecl) = DECL_SAVED_AUTO_RETURN_TYPE (olddecl); DECL_DECLARED_INLINE_P (newdecl) |= DECL_DECLARED_INLINE_P (olddecl); DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl) = (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl)); DECL_DISREGARD_INLINE_LIMITS (newdecl) = DECL_DISREGARD_INLINE_LIMITS (olddecl) = (DECL_DISREGARD_INLINE_LIMITS (newdecl) || DECL_DISREGARD_INLINE_LIMITS (olddecl)); } /* Preserve abstractness on cloned [cd]tors. */ DECL_ABSTRACT_P (newdecl) = DECL_ABSTRACT_P (olddecl); /* Update newdecl's parms to point at olddecl. */ for (parm = DECL_ARGUMENTS (newdecl); parm; parm = DECL_CHAIN (parm)) DECL_CONTEXT (parm) = olddecl; if (! types_match) { SET_DECL_LANGUAGE (olddecl, DECL_LANGUAGE (newdecl)); COPY_DECL_ASSEMBLER_NAME (newdecl, olddecl); COPY_DECL_RTL (newdecl, olddecl); } if (! types_match || new_defines_function) { /* These need to be copied so that the names are available. Note that if the types do match, we'll preserve inline info and other bits, but if not, we won't. */ DECL_ARGUMENTS (olddecl) = DECL_ARGUMENTS (newdecl); DECL_RESULT (olddecl) = DECL_RESULT (newdecl); } /* If redeclaring a builtin function, it stays built in if newdecl is a gnu_inline definition, or if newdecl is just a declaration. */ if (fndecl_built_in_p (olddecl) && (new_defines_function ? GNU_INLINE_P (newdecl) : types_match)) { copy_decl_built_in_function (newdecl, olddecl); /* If we're keeping the built-in definition, keep the rtl, regardless of declaration matches. */ COPY_DECL_RTL (olddecl, newdecl); if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL) { enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl); switch (fncode) { /* If a compatible prototype of these builtin functions is seen, assume the runtime implements it with the expected semantics. */ case BUILT_IN_STPCPY: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_implicit_p (fncode, true); break; default: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_declared_p (fncode, true); break; } copy_attributes_to_builtin (newdecl); } } if (new_defines_function) /* If defining a function declared with other language linkage, use the previously declared language linkage. */ SET_DECL_LANGUAGE (newdecl, DECL_LANGUAGE (olddecl)); else if (types_match) { DECL_RESULT (newdecl) = DECL_RESULT (olddecl); /* Don't clear out the arguments if we're just redeclaring a function. */ if (DECL_ARGUMENTS (olddecl)) DECL_ARGUMENTS (newdecl) = DECL_ARGUMENTS (olddecl); } } else if (TREE_CODE (newdecl) == NAMESPACE_DECL) NAMESPACE_LEVEL (newdecl) = NAMESPACE_LEVEL (olddecl); /* Now preserve various other info from the definition. */ TREE_ADDRESSABLE (newdecl) = TREE_ADDRESSABLE (olddecl); TREE_ASM_WRITTEN (newdecl) = TREE_ASM_WRITTEN (olddecl); DECL_COMMON (newdecl) = DECL_COMMON (olddecl); COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl); /* Warn about conflicting visibility specifications. */ if (DECL_VISIBILITY_SPECIFIED (olddecl) && DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl)) { auto_diagnostic_group d; if (warning_at (newdecl_loc, OPT_Wattributes, "%qD: visibility attribute ignored because it " "conflicts with previous declaration", newdecl)) inform (olddecl_loc, "previous declaration of %qD", olddecl); } /* Choose the declaration which specified visibility. */ if (DECL_VISIBILITY_SPECIFIED (olddecl)) { DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl); DECL_VISIBILITY_SPECIFIED (newdecl) = 1; } /* Init priority used to be merged from newdecl to olddecl by the memcpy, so keep this behavior. */ if (VAR_P (newdecl) && DECL_HAS_INIT_PRIORITY_P (newdecl)) { SET_DECL_INIT_PRIORITY (olddecl, DECL_INIT_PRIORITY (newdecl)); DECL_HAS_INIT_PRIORITY_P (olddecl) = 1; } /* Likewise for DECL_ALIGN, DECL_USER_ALIGN and DECL_PACKED. */ if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl)) { SET_DECL_ALIGN (newdecl, DECL_ALIGN (olddecl)); DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl); } DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl); if (DECL_WARN_IF_NOT_ALIGN (olddecl) > DECL_WARN_IF_NOT_ALIGN (newdecl)) SET_DECL_WARN_IF_NOT_ALIGN (newdecl, DECL_WARN_IF_NOT_ALIGN (olddecl)); if (TREE_CODE (newdecl) == FIELD_DECL) DECL_PACKED (olddecl) = DECL_PACKED (newdecl); /* The DECL_LANG_SPECIFIC information in OLDDECL will be replaced with that from NEWDECL below. */ if (DECL_LANG_SPECIFIC (olddecl)) { gcc_assert (DECL_LANG_SPECIFIC (olddecl) != DECL_LANG_SPECIFIC (newdecl)); ggc_free (DECL_LANG_SPECIFIC (olddecl)); } /* Merge the USED information. */ if (TREE_USED (olddecl)) TREE_USED (newdecl) = 1; else if (TREE_USED (newdecl)) TREE_USED (olddecl) = 1; if (VAR_P (newdecl)) { if (DECL_READ_P (olddecl)) DECL_READ_P (newdecl) = 1; else if (DECL_READ_P (newdecl)) DECL_READ_P (olddecl) = 1; } if (DECL_PRESERVE_P (olddecl)) DECL_PRESERVE_P (newdecl) = 1; else if (DECL_PRESERVE_P (newdecl)) DECL_PRESERVE_P (olddecl) = 1; /* Merge the DECL_FUNCTION_VERSIONED information. newdecl will be copied to olddecl and deleted. */ if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_FUNCTION_VERSIONED (olddecl)) { /* Set the flag for newdecl so that it gets copied to olddecl. */ DECL_FUNCTION_VERSIONED (newdecl) = 1; /* newdecl will be purged after copying to olddecl and is no longer a version. */ cgraph_node::delete_function_version_by_decl (newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { int function_size; struct symtab_node *snode = symtab_node::get (olddecl); function_size = sizeof (struct tree_decl_common); memcpy ((char *) olddecl + sizeof (struct tree_common), (char *) newdecl + sizeof (struct tree_common), function_size - sizeof (struct tree_common)); memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), sizeof (struct tree_function_decl) - sizeof (struct tree_decl_common)); /* Preserve symtab node mapping. */ olddecl->decl_with_vis.symtab_node = snode; if (new_template_info) /* If newdecl is a template instantiation, it is possible that the following sequence of events has occurred: o A friend function was declared in a class template. The class template was instantiated. o The instantiation of the friend declaration was recorded on the instantiation list, and is newdecl. o Later, however, instantiate_class_template called pushdecl on the newdecl to perform name injection. But, pushdecl in turn called duplicate_decls when it discovered that another declaration of a global function with the same name already existed. o Here, in duplicate_decls, we decided to clobber newdecl. If we're going to do that, we'd better make sure that olddecl, and not newdecl, is on the list of instantiations so that if we try to do the instantiation again we won't get the clobbered declaration. */ reregister_specialization (newdecl, new_template_info, olddecl); } else { size_t size = tree_code_size (TREE_CODE (newdecl)); memcpy ((char *) olddecl + sizeof (struct tree_common), (char *) newdecl + sizeof (struct tree_common), sizeof (struct tree_decl_common) - sizeof (struct tree_common)); switch (TREE_CODE (newdecl)) { case LABEL_DECL: case VAR_DECL: case RESULT_DECL: case PARM_DECL: case FIELD_DECL: case TYPE_DECL: case CONST_DECL: { struct symtab_node *snode = NULL; if (VAR_P (newdecl) && (TREE_STATIC (olddecl) || TREE_PUBLIC (olddecl) || DECL_EXTERNAL (olddecl))) snode = symtab_node::get (olddecl); memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), size - sizeof (struct tree_decl_common) + TREE_CODE_LENGTH (TREE_CODE (newdecl)) * sizeof (char *)); if (VAR_P (newdecl)) olddecl->decl_with_vis.symtab_node = snode; } break; default: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common) + TREE_CODE_LENGTH (TREE_CODE (newdecl)) * sizeof (char *)); break; } } if (VAR_OR_FUNCTION_DECL_P (newdecl)) { if (DECL_EXTERNAL (olddecl) || TREE_PUBLIC (olddecl) || TREE_STATIC (olddecl)) { /* Merge the section attribute. We want to issue an error if the sections conflict but that must be done later in decl_attributes since we are called before attributes are assigned. */ if (DECL_SECTION_NAME (newdecl) != NULL) set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl)); if (DECL_ONE_ONLY (newdecl)) { struct symtab_node *oldsym, *newsym; if (TREE_CODE (olddecl) == FUNCTION_DECL) oldsym = cgraph_node::get_create (olddecl); else oldsym = varpool_node::get_create (olddecl); newsym = symtab_node::get (newdecl); oldsym->set_comdat_group (newsym->get_comdat_group ()); } } if (VAR_P (newdecl) && CP_DECL_THREAD_LOCAL_P (newdecl)) { CP_DECL_THREAD_LOCAL_P (olddecl) = true; if (!processing_template_decl) set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl)); } } DECL_UID (olddecl) = olddecl_uid; /* NEWDECL contains the merged attribute lists. Update OLDDECL to be the same. */ DECL_ATTRIBUTES (olddecl) = DECL_ATTRIBUTES (newdecl); /* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl so that encode_section_info has a chance to look at the new decl flags and attributes. */ if (DECL_RTL_SET_P (olddecl) && (TREE_CODE (olddecl) == FUNCTION_DECL || (VAR_P (olddecl) && TREE_STATIC (olddecl)))) make_decl_rtl (olddecl); /* The NEWDECL will no longer be needed. Because every out-of-class declaration of a member results in a call to duplicate_decls, freeing these nodes represents in a significant savings. Before releasing the node, be sore to remove function from symbol table that might have been inserted there to record comdat group. Be sure to however do not free DECL_STRUCT_FUNCTION because this structure is shared in between newdecl and oldecl. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) DECL_STRUCT_FUNCTION (newdecl) = NULL; if (VAR_OR_FUNCTION_DECL_P (newdecl)) { struct symtab_node *snode = symtab_node::get (newdecl); if (snode) snode->remove (); } /* Remove the associated constraints for newdecl, if any, before reclaiming memory. */ if (flag_concepts) remove_constraints (newdecl); ggc_free (newdecl); return olddecl; } /* Return zero if the declaration NEWDECL is valid when the declaration OLDDECL (assumed to be for the same name) has already been seen. Otherwise return an error message format string with a %s where the identifier should go. */ static const char * redeclaration_error_message (tree newdecl, tree olddecl) { if (TREE_CODE (newdecl) == TYPE_DECL) { /* Because C++ can put things into name space for free, constructs like "typedef struct foo { ... } foo" would look like an erroneous redeclaration. */ if (same_type_p (TREE_TYPE (newdecl), TREE_TYPE (olddecl))) return NULL; else return G_("redefinition of %q#D"); } else if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If this is a pure function, its olddecl will actually be the original initialization to `0' (which we force to call abort()). Don't complain about redefinition in this case. */ if (DECL_LANG_SPECIFIC (olddecl) && DECL_PURE_VIRTUAL_P (olddecl) && DECL_INITIAL (olddecl) == NULL_TREE) return NULL; /* If both functions come from different namespaces, this is not a redeclaration - this is a conflict with a used function. */ if (DECL_NAMESPACE_SCOPE_P (olddecl) && DECL_CONTEXT (olddecl) != DECL_CONTEXT (newdecl) && ! decls_match (olddecl, newdecl)) return G_("%qD conflicts with used function"); /* We'll complain about linkage mismatches in warn_extern_redeclared_static. */ /* Defining the same name twice is no good. */ if (decl_defined_p (olddecl) && decl_defined_p (newdecl)) { if (DECL_NAME (olddecl) == NULL_TREE) return G_("%q#D not declared in class"); else if (!GNU_INLINE_P (olddecl) || GNU_INLINE_P (newdecl)) return G_("redefinition of %q#D"); } if (DECL_DECLARED_INLINE_P (olddecl) && DECL_DECLARED_INLINE_P (newdecl)) { bool olda = GNU_INLINE_P (olddecl); bool newa = GNU_INLINE_P (newdecl); if (olda != newa) { if (newa) return G_("%q+D redeclared inline with " "%<gnu_inline%> attribute"); else return G_("%q+D redeclared inline without " "%<gnu_inline%> attribute"); } } if (deduction_guide_p (olddecl) && deduction_guide_p (newdecl)) return G_("deduction guide %q+D redeclared"); /* [class.compare.default]: A definition of a comparison operator as defaulted that appears in a class shall be the first declaration of that function. */ special_function_kind sfk = special_function_p (olddecl); if (sfk == sfk_comparison && DECL_DEFAULTED_FN (newdecl)) return G_("comparison operator %q+D defaulted after " "its first declaration"); check_abi_tag_redeclaration (olddecl, lookup_attribute ("abi_tag", DECL_ATTRIBUTES (olddecl)), lookup_attribute ("abi_tag", DECL_ATTRIBUTES (newdecl))); return NULL; } else if (TREE_CODE (newdecl) == TEMPLATE_DECL) { tree nt, ot; if (TREE_CODE (DECL_TEMPLATE_RESULT (newdecl)) == CONCEPT_DECL) return G_("redefinition of %q#D"); if (TREE_CODE (DECL_TEMPLATE_RESULT (newdecl)) != FUNCTION_DECL) return redeclaration_error_message (DECL_TEMPLATE_RESULT (newdecl), DECL_TEMPLATE_RESULT (olddecl)); if (DECL_TEMPLATE_RESULT (newdecl) == DECL_TEMPLATE_RESULT (olddecl)) return NULL; nt = DECL_TEMPLATE_RESULT (newdecl); if (DECL_TEMPLATE_INFO (nt)) nt = DECL_TEMPLATE_RESULT (template_for_substitution (nt)); ot = DECL_TEMPLATE_RESULT (olddecl); if (DECL_TEMPLATE_INFO (ot)) ot = DECL_TEMPLATE_RESULT (template_for_substitution (ot)); if (DECL_INITIAL (nt) && DECL_INITIAL (ot) && (!GNU_INLINE_P (ot) || GNU_INLINE_P (nt))) return G_("redefinition of %q#D"); if (DECL_DECLARED_INLINE_P (ot) && DECL_DECLARED_INLINE_P (nt)) { bool olda = GNU_INLINE_P (ot); bool newa = GNU_INLINE_P (nt); if (olda != newa) { if (newa) return G_("%q+D redeclared inline with " "%<gnu_inline%> attribute"); else return G_("%q+D redeclared inline without " "%<gnu_inline%> attribute"); } } if (deduction_guide_p (olddecl) && deduction_guide_p (newdecl)) return G_("deduction guide %q+D redeclared"); /* Core issue #226 (C++11): If a friend function template declaration specifies a default template-argument, that declaration shall be a definition and shall be the only declaration of the function template in the translation unit. */ if ((cxx_dialect != cxx98) && TREE_CODE (ot) == FUNCTION_DECL && DECL_UNIQUE_FRIEND_P (ot) && !check_default_tmpl_args (nt, DECL_TEMPLATE_PARMS (newdecl), /*is_primary=*/true, /*is_partial=*/false, /*is_friend_decl=*/2)) return G_("redeclaration of friend %q#D " "may not have default template arguments"); return NULL; } else if (VAR_P (newdecl) && (CP_DECL_THREAD_LOCAL_P (newdecl) != CP_DECL_THREAD_LOCAL_P (olddecl)) && (! DECL_LANG_SPECIFIC (olddecl) || ! CP_DECL_THREADPRIVATE_P (olddecl) || CP_DECL_THREAD_LOCAL_P (newdecl))) { /* Only variables can be thread-local, and all declarations must agree on this property. */ if (CP_DECL_THREAD_LOCAL_P (newdecl)) return G_("thread-local declaration of %q#D follows " "non-thread-local declaration"); else return G_("non-thread-local declaration of %q#D follows " "thread-local declaration"); } else if (toplevel_bindings_p () || DECL_NAMESPACE_SCOPE_P (newdecl)) { /* The objects have been declared at namespace scope. If either is a member of an anonymous union, then this is an invalid redeclaration. For example: int i; union { int i; }; is invalid. */ if ((VAR_P (newdecl) && DECL_ANON_UNION_VAR_P (newdecl)) || (VAR_P (olddecl) && DECL_ANON_UNION_VAR_P (olddecl))) return G_("redeclaration of %q#D"); /* If at least one declaration is a reference, there is no conflict. For example: int i = 3; extern int i; is valid. */ if (DECL_EXTERNAL (newdecl) || DECL_EXTERNAL (olddecl)) return NULL; /* Static data member declared outside a class definition if the variable is defined within the class with constexpr specifier is declaration rather than definition (and deprecated). */ if (cxx_dialect >= cxx17 && VAR_P (olddecl) && DECL_CLASS_SCOPE_P (olddecl) && DECL_DECLARED_CONSTEXPR_P (olddecl) && !DECL_INITIAL (newdecl)) { DECL_EXTERNAL (newdecl) = 1; /* For now, only warn with explicit -Wdeprecated. */ if (global_options_set.x_warn_deprecated) { auto_diagnostic_group d; if (warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wdeprecated, "redundant redeclaration of %<constexpr%> " "static data member %qD", newdecl)) inform (DECL_SOURCE_LOCATION (olddecl), "previous declaration of %qD", olddecl); } return NULL; } /* Reject two definitions. */ return G_("redefinition of %q#D"); } else { /* Objects declared with block scope: */ /* Reject two definitions, and reject a definition together with an external reference. */ if (!(DECL_EXTERNAL (newdecl) && DECL_EXTERNAL (olddecl))) return G_("redeclaration of %q#D"); return NULL; } } /* Hash and equality functions for the named_label table. */ hashval_t named_label_hash::hash (const value_type entry) { return IDENTIFIER_HASH_VALUE (entry->name); } bool named_label_hash::equal (const value_type entry, compare_type name) { return name == entry->name; } /* Look for a label named ID in the current function. If one cannot be found, create one. Return the named_label_entry, or NULL on failure. */ static named_label_entry * lookup_label_1 (tree id, bool making_local_p) { /* You can't use labels at global scope. */ if (current_function_decl == NULL_TREE) { error ("label %qE referenced outside of any function", id); return NULL; } if (!named_labels) named_labels = hash_table<named_label_hash>::create_ggc (13); hashval_t hash = IDENTIFIER_HASH_VALUE (id); named_label_entry **slot = named_labels->find_slot_with_hash (id, hash, INSERT); named_label_entry *old = *slot; if (old && old->label_decl) { if (!making_local_p) return old; if (old->binding_level == current_binding_level) { error ("local label %qE conflicts with existing label", id); inform (DECL_SOURCE_LOCATION (old->label_decl), "previous label"); return NULL; } } /* We are making a new decl, create or reuse the named_label_entry */ named_label_entry *ent = NULL; if (old && !old->label_decl) ent = old; else { ent = ggc_cleared_alloc<named_label_entry> (); ent->name = id; ent->outer = old; *slot = ent; } /* Now create the LABEL_DECL. */ tree decl = build_decl (input_location, LABEL_DECL, id, void_type_node); DECL_CONTEXT (decl) = current_function_decl; SET_DECL_MODE (decl, VOIDmode); if (making_local_p) { C_DECLARED_LABEL_FLAG (decl) = true; DECL_CHAIN (decl) = current_binding_level->names; current_binding_level->names = decl; } ent->label_decl = decl; return ent; } /* Wrapper for lookup_label_1. */ tree lookup_label (tree id) { bool subtime = timevar_cond_start (TV_NAME_LOOKUP); named_label_entry *ent = lookup_label_1 (id, false); timevar_cond_stop (TV_NAME_LOOKUP, subtime); return ent ? ent->label_decl : NULL_TREE; } tree declare_local_label (tree id) { bool subtime = timevar_cond_start (TV_NAME_LOOKUP); named_label_entry *ent = lookup_label_1 (id, true); timevar_cond_stop (TV_NAME_LOOKUP, subtime); return ent ? ent->label_decl : NULL_TREE; } /* Returns nonzero if it is ill-formed to jump past the declaration of DECL. Returns 2 if it's also a real problem. */ static int decl_jump_unsafe (tree decl) { /* [stmt.dcl]/3: A program that jumps from a point where a local variable with automatic storage duration is not in scope to a point where it is in scope is ill-formed unless the variable has scalar type, class type with a trivial default constructor and a trivial destructor, a cv-qualified version of one of these types, or an array of one of the preceding types and is declared without an initializer (8.5). */ tree type = TREE_TYPE (decl); if (!VAR_P (decl) || TREE_STATIC (decl) || type == error_mark_node) return 0; if (DECL_NONTRIVIALLY_INITIALIZED_P (decl) || variably_modified_type_p (type, NULL_TREE)) return 2; if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)) return 1; return 0; } /* A subroutine of check_previous_goto_1 and check_goto to identify a branch to the user. */ static bool identify_goto (tree decl, location_t loc, const location_t *locus, diagnostic_t diag_kind) { bool complained = emit_diagnostic (diag_kind, loc, 0, decl ? N_("jump to label %qD") : N_("jump to case label"), decl); if (complained && locus) inform (*locus, " from here"); return complained; } /* Check that a single previously seen jump to a newly defined label is OK. DECL is the LABEL_DECL or 0; LEVEL is the binding_level for the jump context; NAMES are the names in scope in LEVEL at the jump context; LOCUS is the source position of the jump or 0. Returns true if all is well. */ static bool check_previous_goto_1 (tree decl, cp_binding_level* level, tree names, bool exited_omp, const location_t *locus) { cp_binding_level *b; bool complained = false; int identified = 0; bool saw_eh = false, saw_omp = false, saw_tm = false, saw_cxif = false; if (exited_omp) { complained = identify_goto (decl, input_location, locus, DK_ERROR); if (complained) inform (input_location, " exits OpenMP structured block"); saw_omp = true; identified = 2; } for (b = current_binding_level; b ; b = b->level_chain) { tree new_decls, old_decls = (b == level ? names : NULL_TREE); for (new_decls = b->names; new_decls != old_decls; new_decls = (DECL_P (new_decls) ? DECL_CHAIN (new_decls) : TREE_CHAIN (new_decls))) { int problem = decl_jump_unsafe (new_decls); if (! problem) continue; if (!identified) { complained = identify_goto (decl, input_location, locus, problem > 1 ? DK_ERROR : DK_PERMERROR); identified = 1; } if (complained) { if (problem > 1) inform (DECL_SOURCE_LOCATION (new_decls), " crosses initialization of %q#D", new_decls); else inform (DECL_SOURCE_LOCATION (new_decls), " enters scope of %q#D, which has " "non-trivial destructor", new_decls); } } if (b == level) break; const char *inf = NULL; location_t loc = input_location; switch (b->kind) { case sk_try: if (!saw_eh) inf = G_(" enters %<try%> block"); saw_eh = true; break; case sk_catch: if (!saw_eh) inf = G_(" enters %<catch%> block"); saw_eh = true; break; case sk_omp: if (!saw_omp) inf = G_(" enters OpenMP structured block"); saw_omp = true; break; case sk_transaction: if (!saw_tm) inf = G_(" enters synchronized or atomic statement"); saw_tm = true; break; case sk_block: if (!saw_cxif && level_for_constexpr_if (b->level_chain)) { inf = G_(" enters %<constexpr if%> statement"); loc = EXPR_LOCATION (b->level_chain->this_entity); saw_cxif = true; } break; default: break; } if (inf) { if (identified < 2) complained = identify_goto (decl, input_location, locus, DK_ERROR); identified = 2; if (complained) inform (loc, inf); } } return !identified; } static void check_previous_goto (tree decl, struct named_label_use_entry *use) { check_previous_goto_1 (decl, use->binding_level, use->names_in_scope, use->in_omp_scope, &use->o_goto_locus); } static bool check_switch_goto (cp_binding_level* level) { return check_previous_goto_1 (NULL_TREE, level, level->names, false, NULL); } /* Check that a new jump to a label DECL is OK. Called by finish_goto_stmt. */ void check_goto (tree decl) { /* We can't know where a computed goto is jumping. So we assume that it's OK. */ if (TREE_CODE (decl) != LABEL_DECL) return; /* We didn't record any information about this label when we created it, and there's not much point since it's trivial to analyze as a return. */ if (decl == cdtor_label) return; hashval_t hash = IDENTIFIER_HASH_VALUE (DECL_NAME (decl)); named_label_entry **slot = named_labels->find_slot_with_hash (DECL_NAME (decl), hash, NO_INSERT); named_label_entry *ent = *slot; /* If the label hasn't been defined yet, defer checking. */ if (! DECL_INITIAL (decl)) { /* Don't bother creating another use if the last goto had the same data, and will therefore create the same set of errors. */ if (ent->uses && ent->uses->names_in_scope == current_binding_level->names) return; named_label_use_entry *new_use = ggc_alloc<named_label_use_entry> (); new_use->binding_level = current_binding_level; new_use->names_in_scope = current_binding_level->names; new_use->o_goto_locus = input_location; new_use->in_omp_scope = false; new_use->next = ent->uses; ent->uses = new_use; return; } bool saw_catch = false, complained = false; int identified = 0; tree bad; unsigned ix; if (ent->in_try_scope || ent->in_catch_scope || ent->in_transaction_scope || ent->in_constexpr_if || ent->in_omp_scope || !vec_safe_is_empty (ent->bad_decls)) { diagnostic_t diag_kind = DK_PERMERROR; if (ent->in_try_scope || ent->in_catch_scope || ent->in_constexpr_if || ent->in_transaction_scope || ent->in_omp_scope) diag_kind = DK_ERROR; complained = identify_goto (decl, DECL_SOURCE_LOCATION (decl), &input_location, diag_kind); identified = 1 + (diag_kind == DK_ERROR); } FOR_EACH_VEC_SAFE_ELT (ent->bad_decls, ix, bad) { int u = decl_jump_unsafe (bad); if (u > 1 && DECL_ARTIFICIAL (bad)) { /* Can't skip init of __exception_info. */ if (identified == 1) { complained = identify_goto (decl, DECL_SOURCE_LOCATION (decl), &input_location, DK_ERROR); identified = 2; } if (complained) inform (DECL_SOURCE_LOCATION (bad), " enters %<catch%> block"); saw_catch = true; } else if (complained) { if (u > 1) inform (DECL_SOURCE_LOCATION (bad), " skips initialization of %q#D", bad); else inform (DECL_SOURCE_LOCATION (bad), " enters scope of %q#D which has " "non-trivial destructor", bad); } } if (complained) { if (ent->in_try_scope) inform (input_location, " enters %<try%> block"); else if (ent->in_catch_scope && !saw_catch) inform (input_location, " enters %<catch%> block"); else if (ent->in_transaction_scope) inform (input_location, " enters synchronized or atomic statement"); else if (ent->in_constexpr_if) inform (input_location, " enters %<constexpr if%> statement"); } if (ent->in_omp_scope) { if (complained) inform (input_location, " enters OpenMP structured block"); } else if (flag_openmp) for (cp_binding_level *b = current_binding_level; b ; b = b->level_chain) { if (b == ent->binding_level) break; if (b->kind == sk_omp) { if (identified < 2) { complained = identify_goto (decl, DECL_SOURCE_LOCATION (decl), &input_location, DK_ERROR); identified = 2; } if (complained) inform (input_location, " exits OpenMP structured block"); break; } } } /* Check that a return is ok wrt OpenMP structured blocks. Called by finish_return_stmt. Returns true if all is well. */ bool check_omp_return (void) { for (cp_binding_level *b = current_binding_level; b ; b = b->level_chain) if (b->kind == sk_omp) { error ("invalid exit from OpenMP structured block"); return false; } else if (b->kind == sk_function_parms) break; return true; } /* Define a label, specifying the location in the source file. Return the LABEL_DECL node for the label. */ static tree define_label_1 (location_t location, tree name) { /* After labels, make any new cleanups in the function go into their own new (temporary) binding contour. */ for (cp_binding_level *p = current_binding_level; p->kind != sk_function_parms; p = p->level_chain) p->more_cleanups_ok = 0; named_label_entry *ent = lookup_label_1 (name, false); tree decl = ent->label_decl; if (DECL_INITIAL (decl) != NULL_TREE) { error ("duplicate label %qD", decl); return error_mark_node; } else { /* Mark label as having been defined. */ DECL_INITIAL (decl) = error_mark_node; /* Say where in the source. */ DECL_SOURCE_LOCATION (decl) = location; ent->binding_level = current_binding_level; ent->names_in_scope = current_binding_level->names; for (named_label_use_entry *use = ent->uses; use; use = use->next) check_previous_goto (decl, use); ent->uses = NULL; } return decl; } /* Wrapper for define_label_1. */ tree define_label (location_t location, tree name) { bool running = timevar_cond_start (TV_NAME_LOOKUP); tree ret = define_label_1 (location, name); timevar_cond_stop (TV_NAME_LOOKUP, running); return ret; } struct cp_switch { cp_binding_level *level; struct cp_switch *next; /* The SWITCH_STMT being built. */ tree switch_stmt; /* A splay-tree mapping the low element of a case range to the high element, or NULL_TREE if there is no high element. Used to determine whether or not a new case label duplicates an old case label. We need a tree, rather than simply a hash table, because of the GNU case range extension. */ splay_tree cases; /* Remember whether a default: case label has been seen. */ bool has_default_p; /* Remember whether a BREAK_STMT has been seen in this SWITCH_STMT. */ bool break_stmt_seen_p; /* Set if inside of {FOR,DO,WHILE}_BODY nested inside of a switch, where BREAK_STMT doesn't belong to the SWITCH_STMT. */ bool in_loop_body_p; }; /* A stack of the currently active switch statements. The innermost switch statement is on the top of the stack. There is no need to mark the stack for garbage collection because it is only active during the processing of the body of a function, and we never collect at that point. */ static struct cp_switch *switch_stack; /* Called right after a switch-statement condition is parsed. SWITCH_STMT is the switch statement being parsed. */ void push_switch (tree switch_stmt) { struct cp_switch *p = XNEW (struct cp_switch); p->level = current_binding_level; p->next = switch_stack; p->switch_stmt = switch_stmt; p->cases = splay_tree_new (case_compare, NULL, NULL); p->has_default_p = false; p->break_stmt_seen_p = false; p->in_loop_body_p = false; switch_stack = p; } void pop_switch (void) { struct cp_switch *cs = switch_stack; location_t switch_location; /* Emit warnings as needed. */ switch_location = cp_expr_loc_or_input_loc (cs->switch_stmt); const bool bool_cond_p = (SWITCH_STMT_TYPE (cs->switch_stmt) && TREE_CODE (SWITCH_STMT_TYPE (cs->switch_stmt)) == BOOLEAN_TYPE); if (!processing_template_decl) c_do_switch_warnings (cs->cases, switch_location, SWITCH_STMT_TYPE (cs->switch_stmt), SWITCH_STMT_COND (cs->switch_stmt), bool_cond_p); /* For the benefit of block_may_fallthru remember if the switch body case labels cover all possible values and if there are break; stmts. */ if (cs->has_default_p || (!processing_template_decl && c_switch_covers_all_cases_p (cs->cases, SWITCH_STMT_TYPE (cs->switch_stmt)))) SWITCH_STMT_ALL_CASES_P (cs->switch_stmt) = 1; if (!cs->break_stmt_seen_p) SWITCH_STMT_NO_BREAK_P (cs->switch_stmt) = 1; gcc_assert (!cs->in_loop_body_p); splay_tree_delete (cs->cases); switch_stack = switch_stack->next; free (cs); } /* Note that a BREAK_STMT is about to be added. If it is inside of a SWITCH_STMT and not inside of a loop body inside of it, note in switch_stack we've seen a BREAK_STMT. */ void note_break_stmt (void) { if (switch_stack && !switch_stack->in_loop_body_p) switch_stack->break_stmt_seen_p = true; } /* Note the start of processing of an iteration statement's body. The note_break_stmt function will do nothing while processing it. Return a flag that should be passed to note_iteration_stmt_body_end. */ bool note_iteration_stmt_body_start (void) { if (!switch_stack) return false; bool ret = switch_stack->in_loop_body_p; switch_stack->in_loop_body_p = true; return ret; } /* Note the end of processing of an iteration statement's body. */ void note_iteration_stmt_body_end (bool prev) { if (switch_stack) switch_stack->in_loop_body_p = prev; } /* Convert a case constant VALUE in a switch to the type TYPE of the switch condition. Note that if TYPE and VALUE are already integral we don't really do the conversion because the language-independent warning/optimization code will work better that way. */ static tree case_conversion (tree type, tree value) { if (value == NULL_TREE) return value; value = mark_rvalue_use (value); if (INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (type)) type = type_promotes_to (type); tree ovalue = value; /* The constant-expression VALUE shall be a converted constant expression of the adjusted type of the switch condition, which doesn't allow narrowing conversions. */ value = build_converted_constant_expr (type, value, tf_warning_or_error); if (cxx_dialect >= cxx11 && (SCOPED_ENUM_P (type) || !INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (ovalue)))) /* Use the converted value. */; else /* The already integral case. */ value = ovalue; return cxx_constant_value (value); } /* Note that we've seen a definition of a case label, and complain if this is a bad place for one. */ tree finish_case_label (location_t loc, tree low_value, tree high_value) { tree cond, r; cp_binding_level *p; tree type; if (low_value == NULL_TREE && high_value == NULL_TREE) switch_stack->has_default_p = true; if (processing_template_decl) { tree label; /* For templates, just add the case label; we'll do semantic analysis at instantiation-time. */ label = build_decl (loc, LABEL_DECL, NULL_TREE, void_type_node); return add_stmt (build_case_label (low_value, high_value, label)); } /* Find the condition on which this switch statement depends. */ cond = SWITCH_STMT_COND (switch_stack->switch_stmt); if (cond && TREE_CODE (cond) == TREE_LIST) cond = TREE_VALUE (cond); if (!check_switch_goto (switch_stack->level)) return error_mark_node; type = SWITCH_STMT_TYPE (switch_stack->switch_stmt); if (type == error_mark_node) return error_mark_node; low_value = case_conversion (type, low_value); high_value = case_conversion (type, high_value); r = c_add_case_label (loc, switch_stack->cases, cond, low_value, high_value); /* After labels, make any new cleanups in the function go into their own new (temporary) binding contour. */ for (p = current_binding_level; p->kind != sk_function_parms; p = p->level_chain) p->more_cleanups_ok = 0; return r; } struct typename_info { tree scope; tree name; tree template_id; bool enum_p; bool class_p; }; struct typename_hasher : ggc_ptr_hash<tree_node> { typedef typename_info *compare_type; /* Hash a TYPENAME_TYPE. */ static hashval_t hash (tree t) { hashval_t hash; hash = (htab_hash_pointer (TYPE_CONTEXT (t)) ^ htab_hash_pointer (TYPE_IDENTIFIER (t))); return hash; } /* Compare two TYPENAME_TYPEs. */ static bool equal (tree t1, const typename_info *t2) { return (TYPE_IDENTIFIER (t1) == t2->name && TYPE_CONTEXT (t1) == t2->scope && TYPENAME_TYPE_FULLNAME (t1) == t2->template_id && TYPENAME_IS_ENUM_P (t1) == t2->enum_p && TYPENAME_IS_CLASS_P (t1) == t2->class_p); } }; /* Build a TYPENAME_TYPE. If the type is `typename T::t', CONTEXT is the type of `T', NAME is the IDENTIFIER_NODE for `t'. Returns the new TYPENAME_TYPE. */ static GTY (()) hash_table<typename_hasher> *typename_htab; tree build_typename_type (tree context, tree name, tree fullname, enum tag_types tag_type) { typename_info ti; if (typename_htab == NULL) typename_htab = hash_table<typename_hasher>::create_ggc (61); ti.scope = FROB_CONTEXT (context); ti.name = name; ti.template_id = fullname; ti.enum_p = tag_type == enum_type; ti.class_p = (tag_type == class_type || tag_type == record_type || tag_type == union_type); hashval_t hash = (htab_hash_pointer (ti.scope) ^ htab_hash_pointer (ti.name)); /* See if we already have this type. */ tree *e = typename_htab->find_slot_with_hash (&ti, hash, INSERT); tree t = *e; if (*e) t = *e; else { /* Build the TYPENAME_TYPE. */ t = cxx_make_type (TYPENAME_TYPE); TYPE_CONTEXT (t) = ti.scope; TYPENAME_TYPE_FULLNAME (t) = ti.template_id; TYPENAME_IS_ENUM_P (t) = ti.enum_p; TYPENAME_IS_CLASS_P (t) = ti.class_p; /* Build the corresponding TYPE_DECL. */ tree d = build_decl (input_location, TYPE_DECL, name, t); TYPE_NAME (t) = d; TYPE_STUB_DECL (t) = d; DECL_CONTEXT (d) = ti.scope; DECL_ARTIFICIAL (d) = 1; /* Store it in the hash table. */ *e = t; /* TYPENAME_TYPEs must always be compared structurally, because they may or may not resolve down to another type depending on the currently open classes. */ SET_TYPE_STRUCTURAL_EQUALITY (t); } return t; } /* Resolve `typename CONTEXT::NAME'. TAG_TYPE indicates the tag provided to name the type. Returns an appropriate type, unless an error occurs, in which case error_mark_node is returned. If we locate a non-artificial TYPE_DECL and TF_KEEP_TYPE_DECL is set, we return that, rather than the _TYPE it corresponds to, in other cases we look through the type decl. If TF_ERROR is set, complain about errors, otherwise be quiet. */ tree make_typename_type (tree context, tree name, enum tag_types tag_type, tsubst_flags_t complain) { tree fullname; tree t; bool want_template; if (name == error_mark_node || context == NULL_TREE || context == error_mark_node) return error_mark_node; if (TYPE_P (name)) { if (!(TYPE_LANG_SPECIFIC (name) && (CLASSTYPE_IS_TEMPLATE (name) || CLASSTYPE_USE_TEMPLATE (name)))) name = TYPE_IDENTIFIER (name); else /* Create a TEMPLATE_ID_EXPR for the type. */ name = build_nt (TEMPLATE_ID_EXPR, CLASSTYPE_TI_TEMPLATE (name), CLASSTYPE_TI_ARGS (name)); } else if (TREE_CODE (name) == TYPE_DECL) name = DECL_NAME (name); fullname = name; if (TREE_CODE (name) == TEMPLATE_ID_EXPR) { name = TREE_OPERAND (name, 0); if (DECL_TYPE_TEMPLATE_P (name)) name = TREE_OPERAND (fullname, 0) = DECL_NAME (name); if (TREE_CODE (name) != IDENTIFIER_NODE) { if (complain & tf_error) error ("%qD is not a type", name); return error_mark_node; } } if (TREE_CODE (name) == TEMPLATE_DECL) { if (complain & tf_error) error ("%qD used without template arguments", name); return error_mark_node; } gcc_assert (identifier_p (name)); gcc_assert (TYPE_P (context)); if (TREE_CODE (context) == TYPE_PACK_EXPANSION) /* This can happen for C++17 variadic using (c++/88986). */; else if (!MAYBE_CLASS_TYPE_P (context)) { if (complain & tf_error) error ("%q#T is not a class", context); return error_mark_node; } /* When the CONTEXT is a dependent type, NAME could refer to a dependent base class of CONTEXT. But look inside it anyway if CONTEXT is a currently open scope, in case it refers to a member of the current instantiation or a non-dependent base; lookup will stop when we hit a dependent base. */ if (!dependent_scope_p (context)) /* We should only set WANT_TYPE when we're a nested typename type. Then we can give better diagnostics if we find a non-type. */ t = lookup_field (context, name, 2, /*want_type=*/true); else t = NULL_TREE; if ((!t || TREE_CODE (t) == TREE_LIST) && dependent_type_p (context)) return build_typename_type (context, name, fullname, tag_type); want_template = TREE_CODE (fullname) == TEMPLATE_ID_EXPR; if (!t) { if (complain & tf_error) { if (!COMPLETE_TYPE_P (context)) cxx_incomplete_type_error (NULL_TREE, context); else error (want_template ? G_("no class template named %q#T in %q#T") : G_("no type named %q#T in %q#T"), name, context); } return error_mark_node; } /* Pull out the template from an injected-class-name (or multiple). */ if (want_template) t = maybe_get_template_decl_from_type_decl (t); if (TREE_CODE (t) == TREE_LIST) { if (complain & tf_error) { error ("lookup of %qT in %qT is ambiguous", name, context); print_candidates (t); } return error_mark_node; } if (want_template && !DECL_TYPE_TEMPLATE_P (t)) { if (complain & tf_error) error ("%<typename %T::%D%> names %q#T, which is not a class template", context, name, t); return error_mark_node; } if (!want_template && TREE_CODE (t) != TYPE_DECL) { if (complain & tf_error) error ("%<typename %T::%D%> names %q#T, which is not a type", context, name, t); return error_mark_node; } if (!check_accessibility_of_qualified_id (t, /*object_type=*/NULL_TREE, context, complain)) return error_mark_node; if (want_template) return lookup_template_class (t, TREE_OPERAND (fullname, 1), NULL_TREE, context, /*entering_scope=*/0, complain | tf_user); if (DECL_ARTIFICIAL (t) || !(complain & tf_keep_type_decl)) t = TREE_TYPE (t); maybe_record_typedef_use (t); return t; } /* Resolve `CONTEXT::template NAME'. Returns a TEMPLATE_DECL if the name can be resolved or an UNBOUND_CLASS_TEMPLATE, unless an error occurs, in which case error_mark_node is returned. If PARM_LIST is non-NULL, also make sure that the template parameter list of TEMPLATE_DECL matches. If COMPLAIN zero, don't complain about any errors that occur. */ tree make_unbound_class_template (tree context, tree name, tree parm_list, tsubst_flags_t complain) { if (TYPE_P (name)) name = TYPE_IDENTIFIER (name); else if (DECL_P (name)) name = DECL_NAME (name); gcc_assert (identifier_p (name)); if (!dependent_type_p (context) || currently_open_class (context)) { tree tmpl = NULL_TREE; if (MAYBE_CLASS_TYPE_P (context)) tmpl = lookup_field (context, name, 0, false); if (tmpl && TREE_CODE (tmpl) == TYPE_DECL) tmpl = maybe_get_template_decl_from_type_decl (tmpl); if (!tmpl || !DECL_TYPE_TEMPLATE_P (tmpl)) { if (complain & tf_error) error ("no class template named %q#T in %q#T", name, context); return error_mark_node; } if (parm_list && !comp_template_parms (DECL_TEMPLATE_PARMS (tmpl), parm_list)) { if (complain & tf_error) { error ("template parameters do not match template %qD", tmpl); inform (DECL_SOURCE_LOCATION (tmpl), "%qD declared here", tmpl); } return error_mark_node; } if (!perform_or_defer_access_check (TYPE_BINFO (context), tmpl, tmpl, complain)) return error_mark_node; return tmpl; } /* Build the UNBOUND_CLASS_TEMPLATE. */ tree t = cxx_make_type (UNBOUND_CLASS_TEMPLATE); TYPE_CONTEXT (t) = FROB_CONTEXT (context); TREE_TYPE (t) = NULL_TREE; SET_TYPE_STRUCTURAL_EQUALITY (t); /* Build the corresponding TEMPLATE_DECL. */ tree d = build_decl (input_location, TEMPLATE_DECL, name, t); TYPE_NAME (t) = d; TYPE_STUB_DECL (t) = d; DECL_CONTEXT (d) = TYPE_CONTEXT (t); DECL_ARTIFICIAL (d) = 1; DECL_TEMPLATE_PARMS (d) = parm_list; return t; } /* Push the declarations of builtin types into the global namespace. RID_INDEX is the index of the builtin type in the array RID_POINTERS. NAME is the name used when looking up the builtin type. TYPE is the _TYPE node for the builtin type. The calls to set_global_binding below should be eliminated. Built-in types should not be looked up name; their names are keywords that the parser can recognize. However, there is code in c-common.c that uses identifier_global_value to look up built-in types by name. */ void record_builtin_type (enum rid rid_index, const char* name, tree type) { tree decl = NULL_TREE; if (name) { tree tname = get_identifier (name); tree tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL, tname, type); DECL_ARTIFICIAL (tdecl) = 1; set_global_binding (tdecl); decl = tdecl; } if ((int) rid_index < (int) RID_MAX) if (tree rname = ridpointers[(int) rid_index]) if (!decl || DECL_NAME (decl) != rname) { tree rdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL, rname, type); DECL_ARTIFICIAL (rdecl) = 1; set_global_binding (rdecl); if (!decl) decl = rdecl; } if (decl) { if (!TYPE_NAME (type)) TYPE_NAME (type) = decl; debug_hooks->type_decl (decl, 0); } } /* Push a type into the namespace so that the back ends ignore it. */ static void record_unknown_type (tree type, const char* name) { tree decl = pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier (name), type)); /* Make sure the "unknown type" typedecl gets ignored for debug info. */ DECL_IGNORED_P (decl) = 1; TYPE_DECL_SUPPRESS_DEBUG (decl) = 1; TYPE_SIZE (type) = TYPE_SIZE (void_type_node); SET_TYPE_ALIGN (type, 1); TYPE_USER_ALIGN (type) = 0; SET_TYPE_MODE (type, TYPE_MODE (void_type_node)); } /* Create all the predefined identifiers. */ static void initialize_predefined_identifiers (void) { struct predefined_identifier { const char *name; /* Name. */ tree *node; /* Node to store it in. */ cp_identifier_kind kind; /* Kind of identifier. */ }; /* A table of identifiers to create at startup. */ static const predefined_identifier predefined_identifiers[] = { {"C++", &lang_name_cplusplus, cik_normal}, {"C", &lang_name_c, cik_normal}, /* Some of these names have a trailing space so that it is impossible for them to conflict with names written by users. */ {"__ct ", &ctor_identifier, cik_ctor}, {"__ct_base ", &base_ctor_identifier, cik_ctor}, {"__ct_comp ", &complete_ctor_identifier, cik_ctor}, {"__dt ", &dtor_identifier, cik_dtor}, {"__dt_base ", &base_dtor_identifier, cik_dtor}, {"__dt_comp ", &complete_dtor_identifier, cik_dtor}, {"__dt_del ", &deleting_dtor_identifier, cik_dtor}, {"__conv_op ", &conv_op_identifier, cik_conv_op}, {"__in_chrg", &in_charge_identifier, cik_normal}, {"__as_base ", &as_base_identifier, cik_normal}, {"this", &this_identifier, cik_normal}, {"__delta", &delta_identifier, cik_normal}, {"__pfn", &pfn_identifier, cik_normal}, {"_vptr", &vptr_identifier, cik_normal}, {"__vtt_parm", &vtt_parm_identifier, cik_normal}, {"::", &global_identifier, cik_normal}, /* The demangler expects anonymous namespaces to be called something starting with '_GLOBAL__N_'. It no longer needs to be unique to the TU. */ {"_GLOBAL__N_1", &anon_identifier, cik_normal}, {"auto", &auto_identifier, cik_normal}, {"decltype(auto)", &decltype_auto_identifier, cik_normal}, {"initializer_list", &init_list_identifier, cik_normal}, {"__for_range ", &for_range__identifier, cik_normal}, {"__for_begin ", &for_begin__identifier, cik_normal}, {"__for_end ", &for_end__identifier, cik_normal}, {"__for_range", &for_range_identifier, cik_normal}, {"__for_begin", &for_begin_identifier, cik_normal}, {"__for_end", &for_end_identifier, cik_normal}, {"abi_tag", &abi_tag_identifier, cik_normal}, {"aligned", &aligned_identifier, cik_normal}, {"begin", &begin_identifier, cik_normal}, {"end", &end_identifier, cik_normal}, {"get", &get__identifier, cik_normal}, {"gnu", &gnu_identifier, cik_normal}, {"tuple_element", &tuple_element_identifier, cik_normal}, {"tuple_size", &tuple_size_identifier, cik_normal}, {"type", &type_identifier, cik_normal}, {"value", &value_identifier, cik_normal}, {"_FUN", &fun_identifier, cik_normal}, {"__closure", &closure_identifier, cik_normal}, {"heap uninit", &heap_uninit_identifier, cik_normal}, {"heap ", &heap_identifier, cik_normal}, {"heap deleted", &heap_deleted_identifier, cik_normal}, {NULL, NULL, cik_normal} }; for (const predefined_identifier *pid = predefined_identifiers; pid->name; ++pid) { *pid->node = get_identifier (pid->name); /* Some of these identifiers already have a special kind. */ if (pid->kind != cik_normal) set_identifier_kind (*pid->node, pid->kind); } } /* Create the predefined scalar types of C, and some nodes representing standard constants (0, 1, (void *)0). Initialize the global binding level. Make definitions for built-in primitive functions. */ void cxx_init_decl_processing (void) { tree void_ftype; tree void_ftype_ptr; /* Create all the identifiers we need. */ initialize_predefined_identifiers (); /* Create the global variables. */ push_to_top_level (); current_function_decl = NULL_TREE; current_binding_level = NULL; /* Enter the global namespace. */ gcc_assert (global_namespace == NULL_TREE); global_namespace = build_lang_decl (NAMESPACE_DECL, global_identifier, void_type_node); TREE_PUBLIC (global_namespace) = 1; DECL_CONTEXT (global_namespace) = build_translation_unit_decl (get_identifier (main_input_filename)); /* Remember whether we want the empty class passing ABI change warning in this TU. */ TRANSLATION_UNIT_WARN_EMPTY_P (DECL_CONTEXT (global_namespace)) = warn_abi && abi_version_crosses (12); debug_hooks->register_main_translation_unit (DECL_CONTEXT (global_namespace)); begin_scope (sk_namespace, global_namespace); current_namespace = global_namespace; if (flag_visibility_ms_compat) default_visibility = VISIBILITY_HIDDEN; /* Initially, C. */ current_lang_name = lang_name_c; /* Create the `std' namespace. */ push_namespace (get_identifier ("std")); std_node = current_namespace; pop_namespace (); flag_noexcept_type = (cxx_dialect >= cxx17); c_common_nodes_and_builtins (); tree bool_ftype = build_function_type_list (boolean_type_node, NULL_TREE); tree decl = add_builtin_function ("__builtin_is_constant_evaluated", bool_ftype, CP_BUILT_IN_IS_CONSTANT_EVALUATED, BUILT_IN_FRONTEND, NULL, NULL_TREE); set_call_expr_flags (decl, ECF_CONST | ECF_NOTHROW | ECF_LEAF); tree cptr_ftype = build_function_type_list (const_ptr_type_node, NULL_TREE); decl = add_builtin_function ("__builtin_source_location", cptr_ftype, CP_BUILT_IN_SOURCE_LOCATION, BUILT_IN_FRONTEND, NULL, NULL_TREE); set_call_expr_flags (decl, ECF_CONST | ECF_NOTHROW | ECF_LEAF); integer_two_node = build_int_cst (NULL_TREE, 2); /* Guess at the initial static decls size. */ vec_alloc (static_decls, 500); /* ... and keyed classes. */ vec_alloc (keyed_classes, 100); record_builtin_type (RID_BOOL, "bool", boolean_type_node); truthvalue_type_node = boolean_type_node; truthvalue_false_node = boolean_false_node; truthvalue_true_node = boolean_true_node; empty_except_spec = build_tree_list (NULL_TREE, NULL_TREE); noexcept_true_spec = build_tree_list (boolean_true_node, NULL_TREE); noexcept_false_spec = build_tree_list (boolean_false_node, NULL_TREE); noexcept_deferred_spec = build_tree_list (make_node (DEFERRED_NOEXCEPT), NULL_TREE); #if 0 record_builtin_type (RID_MAX, NULL, string_type_node); #endif delta_type_node = ptrdiff_type_node; vtable_index_type = ptrdiff_type_node; vtt_parm_type = build_pointer_type (const_ptr_type_node); void_ftype = build_function_type_list (void_type_node, NULL_TREE); void_ftype_ptr = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); void_ftype_ptr = build_exception_variant (void_ftype_ptr, empty_except_spec); /* Create the conversion operator marker. This operator's DECL_NAME is in the identifier table, so we can use identifier equality to find it. */ conv_op_marker = build_lang_decl (FUNCTION_DECL, conv_op_identifier, void_ftype); /* C++ extensions */ unknown_type_node = make_node (LANG_TYPE); record_unknown_type (unknown_type_node, "unknown type"); /* Indirecting an UNKNOWN_TYPE node yields an UNKNOWN_TYPE node. */ TREE_TYPE (unknown_type_node) = unknown_type_node; /* Looking up TYPE_POINTER_TO and TYPE_REFERENCE_TO yield the same result. */ TYPE_POINTER_TO (unknown_type_node) = unknown_type_node; TYPE_REFERENCE_TO (unknown_type_node) = unknown_type_node; init_list_type_node = make_node (LANG_TYPE); record_unknown_type (init_list_type_node, "init list"); { /* Make sure we get a unique function type, so we can give its pointer type a name. (This wins for gdb.) */ tree vfunc_type = make_node (FUNCTION_TYPE); TREE_TYPE (vfunc_type) = integer_type_node; TYPE_ARG_TYPES (vfunc_type) = NULL_TREE; layout_type (vfunc_type); vtable_entry_type = build_pointer_type (vfunc_type); } record_builtin_type (RID_MAX, "__vtbl_ptr_type", vtable_entry_type); vtbl_type_node = build_cplus_array_type (vtable_entry_type, NULL_TREE); layout_type (vtbl_type_node); vtbl_type_node = cp_build_qualified_type (vtbl_type_node, TYPE_QUAL_CONST); record_builtin_type (RID_MAX, NULL, vtbl_type_node); vtbl_ptr_type_node = build_pointer_type (vtable_entry_type); layout_type (vtbl_ptr_type_node); record_builtin_type (RID_MAX, NULL, vtbl_ptr_type_node); push_namespace (get_identifier ("__cxxabiv1")); abi_node = current_namespace; pop_namespace (); global_type_node = make_node (LANG_TYPE); record_unknown_type (global_type_node, "global type"); any_targ_node = make_node (LANG_TYPE); record_unknown_type (any_targ_node, "any type"); /* Now, C++. */ current_lang_name = lang_name_cplusplus; if (aligned_new_threshold > 1 && !pow2p_hwi (aligned_new_threshold)) { error ("%<-faligned-new=%d%> is not a power of two", aligned_new_threshold); aligned_new_threshold = 1; } if (aligned_new_threshold == -1) aligned_new_threshold = (cxx_dialect >= cxx17) ? 1 : 0; if (aligned_new_threshold == 1) aligned_new_threshold = malloc_alignment () / BITS_PER_UNIT; { tree newattrs, extvisattr; tree newtype, deltype; tree ptr_ftype_sizetype; tree new_eh_spec; ptr_ftype_sizetype = build_function_type_list (ptr_type_node, size_type_node, NULL_TREE); if (cxx_dialect == cxx98) { tree bad_alloc_id; tree bad_alloc_type_node; tree bad_alloc_decl; push_nested_namespace (std_node); bad_alloc_id = get_identifier ("bad_alloc"); bad_alloc_type_node = make_class_type (RECORD_TYPE); TYPE_CONTEXT (bad_alloc_type_node) = current_namespace; bad_alloc_decl = create_implicit_typedef (bad_alloc_id, bad_alloc_type_node); DECL_CONTEXT (bad_alloc_decl) = current_namespace; pop_nested_namespace (std_node); new_eh_spec = add_exception_specifier (NULL_TREE, bad_alloc_type_node, -1); } else new_eh_spec = noexcept_false_spec; /* Ensure attribs.c is initialized. */ init_attributes (); extvisattr = build_tree_list (get_identifier ("externally_visible"), NULL_TREE); newattrs = tree_cons (get_identifier ("alloc_size"), build_tree_list (NULL_TREE, integer_one_node), extvisattr); newtype = cp_build_type_attribute_variant (ptr_ftype_sizetype, newattrs); newtype = build_exception_variant (newtype, new_eh_spec); deltype = cp_build_type_attribute_variant (void_ftype_ptr, extvisattr); deltype = build_exception_variant (deltype, empty_except_spec); tree opnew = push_cp_library_fn (NEW_EXPR, newtype, 0); DECL_IS_MALLOC (opnew) = 1; DECL_SET_IS_OPERATOR_NEW (opnew, true); DECL_IS_REPLACEABLE_OPERATOR (opnew) = 1; opnew = push_cp_library_fn (VEC_NEW_EXPR, newtype, 0); DECL_IS_MALLOC (opnew) = 1; DECL_SET_IS_OPERATOR_NEW (opnew, true); DECL_IS_REPLACEABLE_OPERATOR (opnew) = 1; tree opdel = push_cp_library_fn (DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; opdel = push_cp_library_fn (VEC_DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; if (flag_sized_deallocation) { /* Also push the sized deallocation variants: void operator delete(void*, std::size_t) throw(); void operator delete[](void*, std::size_t) throw(); */ tree void_ftype_ptr_size = build_function_type_list (void_type_node, ptr_type_node, size_type_node, NULL_TREE); deltype = cp_build_type_attribute_variant (void_ftype_ptr_size, extvisattr); deltype = build_exception_variant (deltype, empty_except_spec); opdel = push_cp_library_fn (DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; opdel = push_cp_library_fn (VEC_DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; } if (aligned_new_threshold) { push_nested_namespace (std_node); tree align_id = get_identifier ("align_val_t"); align_type_node = start_enum (align_id, NULL_TREE, size_type_node, NULL_TREE, /*scoped*/true, NULL); pop_nested_namespace (std_node); /* operator new (size_t, align_val_t); */ newtype = build_function_type_list (ptr_type_node, size_type_node, align_type_node, NULL_TREE); newtype = cp_build_type_attribute_variant (newtype, newattrs); newtype = build_exception_variant (newtype, new_eh_spec); opnew = push_cp_library_fn (NEW_EXPR, newtype, 0); DECL_IS_MALLOC (opnew) = 1; DECL_SET_IS_OPERATOR_NEW (opnew, true); DECL_IS_REPLACEABLE_OPERATOR (opnew) = 1; opnew = push_cp_library_fn (VEC_NEW_EXPR, newtype, 0); DECL_IS_MALLOC (opnew) = 1; DECL_SET_IS_OPERATOR_NEW (opnew, true); DECL_IS_REPLACEABLE_OPERATOR (opnew) = 1; /* operator delete (void *, align_val_t); */ deltype = build_function_type_list (void_type_node, ptr_type_node, align_type_node, NULL_TREE); deltype = cp_build_type_attribute_variant (deltype, extvisattr); deltype = build_exception_variant (deltype, empty_except_spec); opdel = push_cp_library_fn (DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; opdel = push_cp_library_fn (VEC_DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; if (flag_sized_deallocation) { /* operator delete (void *, size_t, align_val_t); */ deltype = build_function_type_list (void_type_node, ptr_type_node, size_type_node, align_type_node, NULL_TREE); deltype = cp_build_type_attribute_variant (deltype, extvisattr); deltype = build_exception_variant (deltype, empty_except_spec); opdel = push_cp_library_fn (DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; opdel = push_cp_library_fn (VEC_DELETE_EXPR, deltype, ECF_NOTHROW); DECL_SET_IS_OPERATOR_DELETE (opdel, true); DECL_IS_REPLACEABLE_OPERATOR (opdel) = 1; } } nullptr_type_node = make_node (NULLPTR_TYPE); TYPE_SIZE (nullptr_type_node) = bitsize_int (GET_MODE_BITSIZE (ptr_mode)); TYPE_SIZE_UNIT (nullptr_type_node) = size_int (GET_MODE_SIZE (ptr_mode)); TYPE_UNSIGNED (nullptr_type_node) = 1; TYPE_PRECISION (nullptr_type_node) = GET_MODE_BITSIZE (ptr_mode); if (abi_version_at_least (9)) SET_TYPE_ALIGN (nullptr_type_node, GET_MODE_ALIGNMENT (ptr_mode)); SET_TYPE_MODE (nullptr_type_node, ptr_mode); record_builtin_type (RID_MAX, "decltype(nullptr)", nullptr_type_node); nullptr_node = build_int_cst (nullptr_type_node, 0); } abort_fndecl = build_library_fn_ptr ("__cxa_pure_virtual", void_ftype, ECF_NORETURN | ECF_NOTHROW | ECF_COLD); if (flag_weak) /* If no definition is available, resolve references to NULL. */ declare_weak (abort_fndecl); /* Perform other language dependent initializations. */ init_class_processing (); init_rtti_processing (); init_template_processing (); if (flag_exceptions) init_exception_processing (); if (! supports_one_only ()) flag_weak = 0; make_fname_decl = cp_make_fname_decl; start_fname_decls (); /* Show we use EH for cleanups. */ if (flag_exceptions) using_eh_for_cleanups (); } /* Create the VAR_DECL for __FUNCTION__ etc. ID is the name to give the decl, LOC is the location to give the decl, NAME is the initialization string and TYPE_DEP indicates whether NAME depended on the type of the function. We make use of that to detect __PRETTY_FUNCTION__ inside a template fn. This is being done lazily at the point of first use, so we mustn't push the decl now. */ static tree cp_make_fname_decl (location_t loc, tree id, int type_dep) { tree domain = NULL_TREE; tree init = NULL_TREE; if (!(type_dep && in_template_function ())) { const char *name = NULL; bool release_name = false; if (current_function_decl == NULL_TREE) name = "top level"; else if (type_dep == 0) { /* __FUNCTION__ */ name = fname_as_string (type_dep); release_name = true; } else { /* __PRETTY_FUNCTION__ */ gcc_checking_assert (type_dep == 1); name = cxx_printable_name (current_function_decl, 2); } size_t length = strlen (name); domain = build_index_type (size_int (length)); init = build_string (length + 1, name); if (release_name) free (const_cast<char *> (name)); } tree type = cp_build_qualified_type (char_type_node, TYPE_QUAL_CONST); type = build_cplus_array_type (type, domain); if (init) TREE_TYPE (init) = type; else init = error_mark_node; tree decl = build_decl (loc, VAR_DECL, id, type); TREE_READONLY (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_DECLARED_CONSTEXPR_P (decl) = 1; TREE_STATIC (decl) = 1; TREE_USED (decl) = 1; SET_DECL_VALUE_EXPR (decl, init); DECL_HAS_VALUE_EXPR_P (decl) = 1; /* For decl_constant_var_p. */ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = 1; if (current_function_decl) { DECL_CONTEXT (decl) = current_function_decl; decl = pushdecl_outermost_localscope (decl); if (decl != error_mark_node) add_decl_expr (decl); } else { DECL_THIS_STATIC (decl) = true; decl = pushdecl_top_level_and_finish (decl, NULL_TREE); } return decl; } /* Install DECL as a builtin function at current global scope. Return the new decl (if we found an existing version). Also installs it into ::std, if it's not '_*'. */ tree cxx_builtin_function (tree decl) { retrofit_lang_decl (decl); DECL_ARTIFICIAL (decl) = 1; SET_DECL_LANGUAGE (decl, lang_c); /* Runtime library routines are, by definition, available in an external shared object. */ DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT; DECL_VISIBILITY_SPECIFIED (decl) = 1; tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); bool hiding = false; if (name[0] != '_' || name[1] != '_') /* In the user's namespace, it must be declared before use. */ hiding = true; else if (IDENTIFIER_LENGTH (id) > strlen ("___chk") && 0 != strncmp (name + 2, "builtin_", strlen ("builtin_")) && 0 == memcmp (name + IDENTIFIER_LENGTH (id) - strlen ("_chk"), "_chk", strlen ("_chk") + 1)) /* Treat __*_chk fortification functions as anticipated as well, unless they are __builtin_*_chk. */ hiding = true; /* All builtins that don't begin with an '_' should additionally go in the 'std' namespace. */ if (name[0] != '_') { tree std_decl = copy_decl (decl); push_nested_namespace (std_node); DECL_CONTEXT (std_decl) = FROB_CONTEXT (std_node); pushdecl (std_decl, hiding); pop_nested_namespace (std_node); } DECL_CONTEXT (decl) = FROB_CONTEXT (current_namespace); decl = pushdecl (decl, hiding); return decl; } /* Like cxx_builtin_function, but guarantee the function is added to the global scope. This is to allow function specific options to add new machine dependent builtins when the target ISA changes via attribute((target(...))) which saves space on program startup if the program does not use non-generic ISAs. */ tree cxx_builtin_function_ext_scope (tree decl) { push_nested_namespace (global_namespace); decl = cxx_builtin_function (decl); pop_nested_namespace (global_namespace); return decl; } /* Implement LANG_HOOKS_SIMULATE_BUILTIN_FUNCTION_DECL. */ tree cxx_simulate_builtin_function_decl (tree decl) { retrofit_lang_decl (decl); DECL_ARTIFICIAL (decl) = 1; SET_DECL_LANGUAGE (decl, lang_cplusplus); DECL_CONTEXT (decl) = FROB_CONTEXT (current_namespace); return pushdecl (decl); } /* Generate a FUNCTION_DECL with the typical flags for a runtime library function. Not called directly. */ static tree build_library_fn (tree name, enum tree_code operator_code, tree type, int ecf_flags) { tree fn = build_lang_decl (FUNCTION_DECL, name, type); DECL_EXTERNAL (fn) = 1; TREE_PUBLIC (fn) = 1; DECL_ARTIFICIAL (fn) = 1; DECL_OVERLOADED_OPERATOR_CODE_RAW (fn) = OVL_OP_INFO (false, operator_code)->ovl_op_code; SET_DECL_LANGUAGE (fn, lang_c); /* Runtime library routines are, by definition, available in an external shared object. */ DECL_VISIBILITY (fn) = VISIBILITY_DEFAULT; DECL_VISIBILITY_SPECIFIED (fn) = 1; set_call_expr_flags (fn, ecf_flags); return fn; } /* Returns the _DECL for a library function with C++ linkage. */ static tree build_cp_library_fn (tree name, enum tree_code operator_code, tree type, int ecf_flags) { tree fn = build_library_fn (name, operator_code, type, ecf_flags); DECL_CONTEXT (fn) = FROB_CONTEXT (current_namespace); SET_DECL_LANGUAGE (fn, lang_cplusplus); return fn; } /* Like build_library_fn, but takes a C string instead of an IDENTIFIER_NODE. */ tree build_library_fn_ptr (const char* name, tree type, int ecf_flags) { return build_library_fn (get_identifier (name), ERROR_MARK, type, ecf_flags); } /* Like build_cp_library_fn, but takes a C string instead of an IDENTIFIER_NODE. */ tree build_cp_library_fn_ptr (const char* name, tree type, int ecf_flags) { return build_cp_library_fn (get_identifier (name), ERROR_MARK, type, ecf_flags); } /* Like build_library_fn, but also pushes the function so that we will be able to find it via get_global_binding. Also, the function may throw exceptions listed in RAISES. */ tree push_library_fn (tree name, tree type, tree raises, int ecf_flags) { if (raises) type = build_exception_variant (type, raises); tree fn = build_library_fn (name, ERROR_MARK, type, ecf_flags); return pushdecl_top_level (fn); } /* Like build_cp_library_fn, but also pushes the function so that it will be found by normal lookup. */ static tree push_cp_library_fn (enum tree_code operator_code, tree type, int ecf_flags) { tree fn = build_cp_library_fn (ovl_op_identifier (false, operator_code), operator_code, type, ecf_flags); pushdecl (fn); if (flag_tm) apply_tm_attr (fn, get_identifier ("transaction_safe")); return fn; } /* Like push_library_fn, but takes a TREE_LIST of parm types rather than a FUNCTION_TYPE. */ tree push_void_library_fn (tree name, tree parmtypes, int ecf_flags) { tree type = build_function_type (void_type_node, parmtypes); return push_library_fn (name, type, NULL_TREE, ecf_flags); } /* Like push_library_fn, but also note that this function throws and does not return. Used for __throw_foo and the like. */ tree push_throw_library_fn (tree name, tree type) { tree fn = push_library_fn (name, type, NULL_TREE, ECF_NORETURN | ECF_COLD); return fn; } /* When we call finish_struct for an anonymous union, we create default copy constructors and such. But, an anonymous union shouldn't have such things; this function undoes the damage to the anonymous union type T. (The reason that we create the synthesized methods is that we don't distinguish `union { int i; }' from `typedef union { int i; } U'. The first is an anonymous union; the second is just an ordinary union type.) */ void fixup_anonymous_aggr (tree t) { /* Wipe out memory of synthesized methods. */ TYPE_HAS_USER_CONSTRUCTOR (t) = 0; TYPE_HAS_DEFAULT_CONSTRUCTOR (t) = 0; TYPE_HAS_COPY_CTOR (t) = 0; TYPE_HAS_CONST_COPY_CTOR (t) = 0; TYPE_HAS_COPY_ASSIGN (t) = 0; TYPE_HAS_CONST_COPY_ASSIGN (t) = 0; /* Splice the implicitly generated functions out of TYPE_FIELDS. */ for (tree probe, *prev_p = &TYPE_FIELDS (t); (probe = *prev_p);) if (TREE_CODE (probe) == FUNCTION_DECL && DECL_ARTIFICIAL (probe)) *prev_p = DECL_CHAIN (probe); else prev_p = &DECL_CHAIN (probe); /* Anonymous aggregates cannot have fields with ctors, dtors or complex assignment operators (because they cannot have these methods themselves). For anonymous unions this is already checked because they are not allowed in any union, otherwise we have to check it. */ if (TREE_CODE (t) != UNION_TYPE) { tree field, type; for (field = TYPE_FIELDS (t); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { type = TREE_TYPE (field); if (CLASS_TYPE_P (type)) { if (TYPE_NEEDS_CONSTRUCTING (type)) error ("member %q+#D with constructor not allowed " "in anonymous aggregate", field); if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)) error ("member %q+#D with destructor not allowed " "in anonymous aggregate", field); if (TYPE_HAS_COMPLEX_COPY_ASSIGN (type)) error ("member %q+#D with copy assignment operator " "not allowed in anonymous aggregate", field); } } } } /* Warn for an attribute located at LOCATION that appertains to the class type CLASS_TYPE that has not been properly placed after its class-key, in it class-specifier. */ void warn_misplaced_attr_for_class_type (location_t location, tree class_type) { gcc_assert (OVERLOAD_TYPE_P (class_type)); auto_diagnostic_group d; if (warning_at (location, OPT_Wattributes, "attribute ignored in declaration " "of %q#T", class_type)) inform (location, "attribute for %q#T must follow the %qs keyword", class_type, class_key_or_enum_as_string (class_type)); } /* Returns the cv-qualifiers that apply to the type specified by the DECLSPECS. */ static int get_type_quals (const cp_decl_specifier_seq *declspecs) { int type_quals = TYPE_UNQUALIFIED; if (decl_spec_seq_has_spec_p (declspecs, ds_const)) type_quals |= TYPE_QUAL_CONST; if (decl_spec_seq_has_spec_p (declspecs, ds_volatile)) type_quals |= TYPE_QUAL_VOLATILE; if (decl_spec_seq_has_spec_p (declspecs, ds_restrict)) type_quals |= TYPE_QUAL_RESTRICT; return type_quals; } /* Make sure that a declaration with no declarator is well-formed, i.e. just declares a tagged type or anonymous union. Returns the type declared; or NULL_TREE if none. */ tree check_tag_decl (cp_decl_specifier_seq *declspecs, bool explicit_type_instantiation_p) { int saw_friend = decl_spec_seq_has_spec_p (declspecs, ds_friend); int saw_typedef = decl_spec_seq_has_spec_p (declspecs, ds_typedef); /* If a class, struct, or enum type is declared by the DECLSPECS (i.e, if a class-specifier, enum-specifier, or non-typename elaborated-type-specifier appears in the DECLSPECS), DECLARED_TYPE is set to the corresponding type. */ tree declared_type = NULL_TREE; bool error_p = false; if (declspecs->multiple_types_p) error_at (smallest_type_location (declspecs), "multiple types in one declaration"); else if (declspecs->redefined_builtin_type) { location_t loc = declspecs->locations[ds_redefined_builtin_type_spec]; if (!in_system_header_at (loc)) permerror (loc, "redeclaration of C++ built-in type %qT", declspecs->redefined_builtin_type); return NULL_TREE; } if (declspecs->type && TYPE_P (declspecs->type) && ((TREE_CODE (declspecs->type) != TYPENAME_TYPE && MAYBE_CLASS_TYPE_P (declspecs->type)) || TREE_CODE (declspecs->type) == ENUMERAL_TYPE)) declared_type = declspecs->type; else if (declspecs->type == error_mark_node) error_p = true; if (type_uses_auto (declared_type)) { error_at (declspecs->locations[ds_type_spec], "%<auto%> can only be specified for variables " "or function declarations"); return error_mark_node; } if (declared_type && !OVERLOAD_TYPE_P (declared_type)) declared_type = NULL_TREE; if (!declared_type && !saw_friend && !error_p) permerror (input_location, "declaration does not declare anything"); /* Check for an anonymous union. */ else if (declared_type && RECORD_OR_UNION_CODE_P (TREE_CODE (declared_type)) && TYPE_UNNAMED_P (declared_type)) { /* 7/3 In a simple-declaration, the optional init-declarator-list can be omitted only when declaring a class (clause 9) or enumeration (7.2), that is, when the decl-specifier-seq contains either a class-specifier, an elaborated-type-specifier with a class-key (9.1), or an enum-specifier. In these cases and whenever a class-specifier or enum-specifier is present in the decl-specifier-seq, the identifiers in these specifiers are among the names being declared by the declaration (as class-name, enum-names, or enumerators, depending on the syntax). In such cases, and except for the declaration of an unnamed bit-field (9.6), the decl-specifier-seq shall introduce one or more names into the program, or shall redeclare a name introduced by a previous declaration. [Example: enum { }; // ill-formed typedef class { }; // ill-formed --end example] */ if (saw_typedef) { error_at (declspecs->locations[ds_typedef], "missing type-name in typedef-declaration"); return NULL_TREE; } /* Anonymous unions are objects, so they can have specifiers. */; SET_ANON_AGGR_TYPE_P (declared_type); if (TREE_CODE (declared_type) != UNION_TYPE) pedwarn (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (declared_type)), OPT_Wpedantic, "ISO C++ prohibits anonymous structs"); } else { if (decl_spec_seq_has_spec_p (declspecs, ds_inline)) error_at (declspecs->locations[ds_inline], "%<inline%> can only be specified for functions"); else if (decl_spec_seq_has_spec_p (declspecs, ds_virtual)) error_at (declspecs->locations[ds_virtual], "%<virtual%> can only be specified for functions"); else if (saw_friend && (!current_class_type || current_scope () != current_class_type)) error_at (declspecs->locations[ds_friend], "%<friend%> can only be specified inside a class"); else if (decl_spec_seq_has_spec_p (declspecs, ds_explicit)) error_at (declspecs->locations[ds_explicit], "%<explicit%> can only be specified for constructors"); else if (declspecs->storage_class) error_at (declspecs->locations[ds_storage_class], "a storage class can only be specified for objects " "and functions"); else if (decl_spec_seq_has_spec_p (declspecs, ds_const)) error_at (declspecs->locations[ds_const], "%<const%> can only be specified for objects and " "functions"); else if (decl_spec_seq_has_spec_p (declspecs, ds_volatile)) error_at (declspecs->locations[ds_volatile], "%<volatile%> can only be specified for objects and " "functions"); else if (decl_spec_seq_has_spec_p (declspecs, ds_restrict)) error_at (declspecs->locations[ds_restrict], "%<__restrict%> can only be specified for objects and " "functions"); else if (decl_spec_seq_has_spec_p (declspecs, ds_thread)) error_at (declspecs->locations[ds_thread], "%<__thread%> can only be specified for objects " "and functions"); else if (saw_typedef) warning_at (declspecs->locations[ds_typedef], 0, "%<typedef%> was ignored in this declaration"); else if (decl_spec_seq_has_spec_p (declspecs, ds_constexpr)) error_at (declspecs->locations[ds_constexpr], "%qs cannot be used for type declarations", "constexpr"); else if (decl_spec_seq_has_spec_p (declspecs, ds_constinit)) error_at (declspecs->locations[ds_constinit], "%qs cannot be used for type declarations", "constinit"); else if (decl_spec_seq_has_spec_p (declspecs, ds_consteval)) error_at (declspecs->locations[ds_consteval], "%qs cannot be used for type declarations", "consteval"); } if (declspecs->attributes && warn_attributes && declared_type) { location_t loc; if (!CLASS_TYPE_P (declared_type) || !CLASSTYPE_TEMPLATE_INSTANTIATION (declared_type)) /* For a non-template class, use the name location. */ loc = location_of (declared_type); else /* For a template class (an explicit instantiation), use the current location. */ loc = input_location; if (explicit_type_instantiation_p) /* [dcl.attr.grammar]/4: No attribute-specifier-seq shall appertain to an explicit instantiation. */ { if (warning_at (loc, OPT_Wattributes, "attribute ignored in explicit instantiation %q#T", declared_type)) inform (loc, "no attribute can be applied to " "an explicit instantiation"); } else warn_misplaced_attr_for_class_type (loc, declared_type); } return declared_type; } /* Called when a declaration is seen that contains no names to declare. If its type is a reference to a structure, union or enum inherited from a containing scope, shadow that tag name for the current scope with a forward reference. If its type defines a new named structure or union or defines an enum, it is valid but we need not do anything here. Otherwise, it is an error. C++: may have to grok the declspecs to learn about static, complain for anonymous unions. Returns the TYPE declared -- or NULL_TREE if none. */ tree shadow_tag (cp_decl_specifier_seq *declspecs) { tree t = check_tag_decl (declspecs, /*explicit_type_instantiation_p=*/false); if (!t) return NULL_TREE; if (maybe_process_partial_specialization (t) == error_mark_node) return NULL_TREE; /* This is where the variables in an anonymous union are declared. An anonymous union declaration looks like: union { ... } ; because there is no declarator after the union, the parser sends that declaration here. */ if (ANON_AGGR_TYPE_P (t)) { fixup_anonymous_aggr (t); if (TYPE_FIELDS (t)) { tree decl = grokdeclarator (/*declarator=*/NULL, declspecs, NORMAL, 0, NULL); finish_anon_union (decl); } } return t; } /* Decode a "typename", such as "int **", returning a ..._TYPE node. */ tree groktypename (cp_decl_specifier_seq *type_specifiers, const cp_declarator *declarator, bool is_template_arg) { tree attrs; tree type; enum decl_context context = is_template_arg ? TEMPLATE_TYPE_ARG : TYPENAME; attrs = type_specifiers->attributes; type_specifiers->attributes = NULL_TREE; type = grokdeclarator (declarator, type_specifiers, context, 0, &attrs); if (attrs && type != error_mark_node) { if (CLASS_TYPE_P (type)) warning (OPT_Wattributes, "ignoring attributes applied to class type %qT " "outside of definition", type); else if (MAYBE_CLASS_TYPE_P (type)) /* A template type parameter or other dependent type. */ warning (OPT_Wattributes, "ignoring attributes applied to dependent " "type %qT without an associated declaration", type); else cplus_decl_attributes (&type, attrs, 0); } return type; } /* Process a DECLARATOR for a function-scope or namespace-scope variable or function declaration. (Function definitions go through start_function; class member declarations appearing in the body of the class go through grokfield.) The DECL corresponding to the DECLARATOR is returned. If an error occurs, the error_mark_node is returned instead. DECLSPECS are the decl-specifiers for the declaration. INITIALIZED is SD_INITIALIZED if an explicit initializer is present, or SD_DEFAULTED for an explicitly defaulted function, or SD_DELETED for an explicitly deleted function, but 0 (SD_UNINITIALIZED) if this is a variable implicitly initialized via a default constructor. It can also be SD_DECOMPOSITION which behaves much like SD_INITIALIZED, but we also mark the new decl as DECL_DECOMPOSITION_P. ATTRIBUTES and PREFIX_ATTRIBUTES are GNU attributes associated with this declaration. The scope represented by the context of the returned DECL is pushed (if it is not the global namespace) and is assigned to *PUSHED_SCOPE_P. The caller is then responsible for calling pop_scope on *PUSHED_SCOPE_P if it is set. */ tree start_decl (const cp_declarator *declarator, cp_decl_specifier_seq *declspecs, int initialized, tree attributes, tree prefix_attributes, tree *pushed_scope_p) { tree decl; tree context; bool was_public; int flags; bool alias; *pushed_scope_p = NULL_TREE; attributes = chainon (attributes, prefix_attributes); decl = grokdeclarator (declarator, declspecs, NORMAL, initialized, &attributes); if (decl == NULL_TREE || VOID_TYPE_P (decl) || decl == error_mark_node) return error_mark_node; context = CP_DECL_CONTEXT (decl); if (context != global_namespace) *pushed_scope_p = push_scope (context); if (initialized && TREE_CODE (decl) == TYPE_DECL) { error_at (DECL_SOURCE_LOCATION (decl), "typedef %qD is initialized (use %qs instead)", decl, "decltype"); return error_mark_node; } if (initialized) { if (! toplevel_bindings_p () && DECL_EXTERNAL (decl)) warning (0, "declaration of %q#D has %<extern%> and is initialized", decl); DECL_EXTERNAL (decl) = 0; if (toplevel_bindings_p ()) TREE_STATIC (decl) = 1; } alias = lookup_attribute ("alias", DECL_ATTRIBUTES (decl)) != 0; if (alias && TREE_CODE (decl) == FUNCTION_DECL) record_key_method_defined (decl); /* If this is a typedef that names the class for linkage purposes (7.1.3p8), apply any attributes directly to the type. */ if (TREE_CODE (decl) == TYPE_DECL && OVERLOAD_TYPE_P (TREE_TYPE (decl)) && decl == TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (decl)))) flags = ATTR_FLAG_TYPE_IN_PLACE; else flags = 0; /* Set attributes here so if duplicate decl, will have proper attributes. */ cplus_decl_attributes (&decl, attributes, flags); /* Dllimported symbols cannot be defined. Static data members (which can be initialized in-class and dllimported) go through grokfield, not here, so we don't need to exclude those decls when checking for a definition. */ if (initialized && DECL_DLLIMPORT_P (decl)) { error_at (DECL_SOURCE_LOCATION (decl), "definition of %q#D is marked %<dllimport%>", decl); DECL_DLLIMPORT_P (decl) = 0; } /* If #pragma weak was used, mark the decl weak now. */ if (!processing_template_decl && !DECL_DECOMPOSITION_P (decl)) maybe_apply_pragma_weak (decl); if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && DECL_UNINLINABLE (decl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl))) warning_at (DECL_SOURCE_LOCATION (decl), 0, "inline function %qD given attribute %qs", decl, "noinline"); if (TYPE_P (context) && COMPLETE_TYPE_P (complete_type (context))) { bool this_tmpl = (processing_template_decl > template_class_depth (context)); if (VAR_P (decl)) { tree field = lookup_field (context, DECL_NAME (decl), 0, false); if (field == NULL_TREE || !(VAR_P (field) || variable_template_p (field))) error ("%q+#D is not a static data member of %q#T", decl, context); else if (variable_template_p (field) && (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_SPECIALIZATION (decl))) /* OK, specialization was already checked. */; else if (variable_template_p (field) && !this_tmpl) { error_at (DECL_SOURCE_LOCATION (decl), "non-member-template declaration of %qD", decl); inform (DECL_SOURCE_LOCATION (field), "does not match " "member template declaration here"); return error_mark_node; } else { if (variable_template_p (field)) field = DECL_TEMPLATE_RESULT (field); if (DECL_CONTEXT (field) != context) { if (!same_type_p (DECL_CONTEXT (field), context)) permerror (input_location, "ISO C++ does not permit %<%T::%D%> " "to be defined as %<%T::%D%>", DECL_CONTEXT (field), DECL_NAME (decl), context, DECL_NAME (decl)); DECL_CONTEXT (decl) = DECL_CONTEXT (field); } /* Static data member are tricky; an in-class initialization still doesn't provide a definition, so the in-class declaration will have DECL_EXTERNAL set, but will have an initialization. Thus, duplicate_decls won't warn about this situation, and so we check here. */ if (initialized && DECL_INITIALIZED_IN_CLASS_P (field)) error ("duplicate initialization of %qD", decl); field = duplicate_decls (decl, field); if (field == error_mark_node) return error_mark_node; else if (field) decl = field; } } else { tree field = check_classfn (context, decl, this_tmpl ? current_template_parms : NULL_TREE); if (field && field != error_mark_node && duplicate_decls (decl, field)) decl = field; } /* cp_finish_decl sets DECL_EXTERNAL if DECL_IN_AGGR_P is set. */ DECL_IN_AGGR_P (decl) = 0; /* Do not mark DECL as an explicit specialization if it was not already marked as an instantiation; a declaration should never be marked as a specialization unless we know what template is being specialized. */ if (DECL_LANG_SPECIFIC (decl) && DECL_USE_TEMPLATE (decl)) { SET_DECL_TEMPLATE_SPECIALIZATION (decl); if (TREE_CODE (decl) == FUNCTION_DECL) DECL_COMDAT (decl) = (TREE_PUBLIC (decl) && DECL_DECLARED_INLINE_P (decl)); else DECL_COMDAT (decl) = false; /* [temp.expl.spec] An explicit specialization of a static data member of a template is a definition if the declaration includes an initializer; otherwise, it is a declaration. We check for processing_specialization so this only applies to the new specialization syntax. */ if (!initialized && processing_specialization) DECL_EXTERNAL (decl) = 1; } if (DECL_EXTERNAL (decl) && ! DECL_TEMPLATE_SPECIALIZATION (decl) /* Aliases are definitions. */ && !alias) permerror (declarator->id_loc, "declaration of %q#D outside of class is not definition", decl); } /* Create a DECL_LANG_SPECIFIC so that DECL_DECOMPOSITION_P works. */ if (initialized == SD_DECOMPOSITION) fit_decomposition_lang_decl (decl, NULL_TREE); was_public = TREE_PUBLIC (decl); if ((DECL_EXTERNAL (decl) || TREE_CODE (decl) == FUNCTION_DECL) && current_function_decl) /* A function-scope decl of some namespace-scope decl. */ DECL_LOCAL_DECL_P (decl) = true; /* Enter this declaration into the symbol table. Don't push the plain VAR_DECL for a variable template. */ if (!template_parm_scope_p () || !VAR_P (decl)) decl = maybe_push_decl (decl); if (processing_template_decl) decl = push_template_decl (decl); if (decl == error_mark_node) return error_mark_node; if (VAR_P (decl) && DECL_NAMESPACE_SCOPE_P (decl) && !TREE_PUBLIC (decl) && !was_public && !DECL_THIS_STATIC (decl) && !DECL_ARTIFICIAL (decl)) { /* This is a const variable with implicit 'static'. Set DECL_THIS_STATIC so we can tell it from variables that are !TREE_PUBLIC because of the anonymous namespace. */ gcc_assert (CP_TYPE_CONST_P (TREE_TYPE (decl)) || errorcount); DECL_THIS_STATIC (decl) = 1; } if (current_function_decl && VAR_P (decl) && DECL_DECLARED_CONSTEXPR_P (current_function_decl)) { bool ok = false; if (CP_DECL_THREAD_LOCAL_P (decl)) error_at (DECL_SOURCE_LOCATION (decl), "%qD declared %<thread_local%> in %qs function", decl, DECL_IMMEDIATE_FUNCTION_P (current_function_decl) ? "consteval" : "constexpr"); else if (TREE_STATIC (decl)) error_at (DECL_SOURCE_LOCATION (decl), "%qD declared %<static%> in %qs function", decl, DECL_IMMEDIATE_FUNCTION_P (current_function_decl) ? "consteval" : "constexpr"); else ok = true; if (!ok) cp_function_chain->invalid_constexpr = true; } if (!processing_template_decl && VAR_P (decl)) start_decl_1 (decl, initialized); return decl; } /* Process the declaration of a variable DECL. INITIALIZED is true iff DECL is explicitly initialized. (INITIALIZED is false if the variable is initialized via an implicitly-called constructor.) This function must be called for ordinary variables (including, for example, implicit instantiations of templates), but must not be called for template declarations. */ void start_decl_1 (tree decl, bool initialized) { tree type; bool complete_p; bool aggregate_definition_p; gcc_assert (!processing_template_decl); if (error_operand_p (decl)) return; gcc_assert (VAR_P (decl)); type = TREE_TYPE (decl); complete_p = COMPLETE_TYPE_P (type); aggregate_definition_p = MAYBE_CLASS_TYPE_P (type) && !DECL_EXTERNAL (decl); /* If an explicit initializer is present, or if this is a definition of an aggregate, then we need a complete type at this point. (Scalars are always complete types, so there is nothing to check.) This code just sets COMPLETE_P; errors (if necessary) are issued below. */ if ((initialized || aggregate_definition_p) && !complete_p && COMPLETE_TYPE_P (complete_type (type))) { complete_p = true; /* We will not yet have set TREE_READONLY on DECL if the type was "const", but incomplete, before this point. But, now, we have a complete type, so we can try again. */ cp_apply_type_quals_to_decl (cp_type_quals (type), decl); } if (is_global_var (decl)) { type_context_kind context = (DECL_THREAD_LOCAL_P (decl) ? TCTX_THREAD_STORAGE : TCTX_STATIC_STORAGE); verify_type_context (input_location, context, TREE_TYPE (decl)); } if (initialized) /* Is it valid for this decl to have an initializer at all? */ { /* Don't allow initializations for incomplete types except for arrays which might be completed by the initialization. */ if (complete_p) ; /* A complete type is ok. */ else if (type_uses_auto (type)) ; /* An auto type is ok. */ else if (TREE_CODE (type) != ARRAY_TYPE) { error ("variable %q#D has initializer but incomplete type", decl); type = TREE_TYPE (decl) = error_mark_node; } else if (!COMPLETE_TYPE_P (complete_type (TREE_TYPE (type)))) { if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl)) error ("elements of array %q#D have incomplete type", decl); /* else we already gave an error in start_decl. */ } } else if (aggregate_definition_p && !complete_p) { if (type_uses_auto (type)) gcc_assert (CLASS_PLACEHOLDER_TEMPLATE (type)); else { error ("aggregate %q#D has incomplete type and cannot be defined", decl); /* Change the type so that assemble_variable will give DECL an rtl we can live with: (mem (const_int 0)). */ type = TREE_TYPE (decl) = error_mark_node; } } /* Create a new scope to hold this declaration if necessary. Whether or not a new scope is necessary cannot be determined until after the type has been completed; if the type is a specialization of a class template it is not until after instantiation has occurred that TYPE_HAS_NONTRIVIAL_DESTRUCTOR will be set correctly. */ maybe_push_cleanup_level (type); } /* Given a parenthesized list of values INIT, create a CONSTRUCTOR to handle C++20 P0960. TYPE is the type of the object we're initializing. */ tree do_aggregate_paren_init (tree init, tree type) { tree val = TREE_VALUE (init); if (TREE_CHAIN (init) == NULL_TREE) { /* If the list has a single element and it's a string literal, then it's the initializer for the array as a whole. */ if (TREE_CODE (type) == ARRAY_TYPE && char_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (type))) && TREE_CODE (tree_strip_any_location_wrapper (val)) == STRING_CST) return val; /* Handle non-standard extensions like compound literals. This also prevents triggering aggregate parenthesized-initialization in compiler-generated code for =default. */ else if (same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (val))) return val; } init = build_constructor_from_list (init_list_type_node, init); CONSTRUCTOR_IS_DIRECT_INIT (init) = true; CONSTRUCTOR_IS_PAREN_INIT (init) = true; return init; } /* Handle initialization of references. DECL, TYPE, and INIT have the same meaning as in cp_finish_decl. *CLEANUP must be NULL on entry, but will be set to a new CLEANUP_STMT if a temporary is created that must be destroyed subsequently. Returns an initializer expression to use to initialize DECL, or NULL if the initialization can be performed statically. Quotes on semantics can be found in ARM 8.4.3. */ static tree grok_reference_init (tree decl, tree type, tree init, int flags) { if (init == NULL_TREE) { if ((DECL_LANG_SPECIFIC (decl) == 0 || DECL_IN_AGGR_P (decl) == 0) && ! DECL_THIS_EXTERN (decl)) error_at (DECL_SOURCE_LOCATION (decl), "%qD declared as reference but not initialized", decl); return NULL_TREE; } tree ttype = TREE_TYPE (type); if (TREE_CODE (init) == TREE_LIST) { /* This handles (C++20 only) code like const A& r(1, 2, 3); where we treat the parenthesized list as a CONSTRUCTOR. */ if (TREE_TYPE (init) == NULL_TREE && CP_AGGREGATE_TYPE_P (ttype) && !DECL_DECOMPOSITION_P (decl) && (cxx_dialect >= cxx20)) { /* We don't know yet if we should treat const A& r(1) as const A& r{1}. */ if (list_length (init) == 1) { flags |= LOOKUP_AGGREGATE_PAREN_INIT; init = build_x_compound_expr_from_list (init, ELK_INIT, tf_warning_or_error); } /* If the list had more than one element, the code is ill-formed pre-C++20, so we can build a constructor right away. */ else init = do_aggregate_paren_init (init, ttype); } else init = build_x_compound_expr_from_list (init, ELK_INIT, tf_warning_or_error); } if (TREE_CODE (ttype) != ARRAY_TYPE && TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE) /* Note: default conversion is only called in very special cases. */ init = decay_conversion (init, tf_warning_or_error); /* check_initializer handles this for non-reference variables, but for references we need to do it here or the initializer will get the incomplete array type and confuse later calls to cp_complete_array_type. */ if (TREE_CODE (ttype) == ARRAY_TYPE && TYPE_DOMAIN (ttype) == NULL_TREE && (BRACE_ENCLOSED_INITIALIZER_P (init) || TREE_CODE (init) == STRING_CST)) { cp_complete_array_type (&ttype, init, false); if (ttype != TREE_TYPE (type)) type = cp_build_reference_type (ttype, TYPE_REF_IS_RVALUE (type)); } /* Convert INIT to the reference type TYPE. This may involve the creation of a temporary, whose lifetime must be the same as that of the reference. If so, a DECL_EXPR for the temporary will be added just after the DECL_EXPR for DECL. That's why we don't set DECL_INITIAL for local references (instead assigning to them explicitly); we need to allow the temporary to be initialized first. */ return initialize_reference (type, init, flags, tf_warning_or_error); } /* Designated initializers in arrays are not supported in GNU C++. The parser cannot detect this error since it does not know whether a given brace-enclosed initializer is for a class type or for an array. This function checks that CE does not use a designated initializer. If it does, an error is issued. Returns true if CE is valid, i.e., does not have a designated initializer. */ bool check_array_designated_initializer (constructor_elt *ce, unsigned HOST_WIDE_INT index) { /* Designated initializers for array elements are not supported. */ if (ce->index) { /* The parser only allows identifiers as designated initializers. */ if (ce->index == error_mark_node) { error ("name used in a GNU-style designated " "initializer for an array"); return false; } else if (identifier_p (ce->index)) { error ("name %qD used in a GNU-style designated " "initializer for an array", ce->index); return false; } tree ce_index = build_expr_type_conversion (WANT_INT | WANT_ENUM, ce->index, true); if (ce_index && INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (ce_index)) && (TREE_CODE (ce_index = fold_non_dependent_expr (ce_index)) == INTEGER_CST)) { /* A C99 designator is OK if it matches the current index. */ if (wi::to_wide (ce_index) == index) { ce->index = ce_index; return true; } else sorry ("non-trivial designated initializers not supported"); } else error_at (cp_expr_loc_or_input_loc (ce->index), "C99 designator %qE is not an integral constant-expression", ce->index); return false; } return true; } /* When parsing `int a[] = {1, 2};' we don't know the size of the array until we finish parsing the initializer. If that's the situation we're in, update DECL accordingly. */ static void maybe_deduce_size_from_array_init (tree decl, tree init) { tree type = TREE_TYPE (decl); if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE && TREE_CODE (decl) != TYPE_DECL) { /* do_default is really a C-ism to deal with tentative definitions. But let's leave it here to ease the eventual merge. */ int do_default = !DECL_EXTERNAL (decl); tree initializer = init ? init : DECL_INITIAL (decl); int failure = 0; /* Check that there are no designated initializers in INIT, as those are not supported in GNU C++, and as the middle-end will crash if presented with a non-numeric designated initializer. */ if (initializer && BRACE_ENCLOSED_INITIALIZER_P (initializer)) { vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (initializer); constructor_elt *ce; HOST_WIDE_INT i; FOR_EACH_VEC_SAFE_ELT (v, i, ce) { if (instantiation_dependent_expression_p (ce->index)) return; if (!check_array_designated_initializer (ce, i)) failure = 1; } } if (failure) TREE_TYPE (decl) = error_mark_node; else { failure = cp_complete_array_type (&TREE_TYPE (decl), initializer, do_default); if (failure == 1) { error_at (cp_expr_loc_or_loc (initializer, DECL_SOURCE_LOCATION (decl)), "initializer fails to determine size of %qD", decl); } else if (failure == 2) { if (do_default) { error_at (DECL_SOURCE_LOCATION (decl), "array size missing in %qD", decl); } /* If a `static' var's size isn't known, make it extern as well as static, so it does not get allocated. If it's not `static', then don't mark it extern; finish_incomplete_decl will give it a default size and it will get allocated. */ else if (!pedantic && TREE_STATIC (decl) && !TREE_PUBLIC (decl)) DECL_EXTERNAL (decl) = 1; } else if (failure == 3) { error_at (DECL_SOURCE_LOCATION (decl), "zero-size array %qD", decl); } } cp_apply_type_quals_to_decl (cp_type_quals (TREE_TYPE (decl)), decl); relayout_decl (decl); } } /* Set DECL_SIZE, DECL_ALIGN, etc. for DECL (a VAR_DECL), and issue any appropriate error messages regarding the layout. */ static void layout_var_decl (tree decl) { tree type; type = TREE_TYPE (decl); if (type == error_mark_node) return; /* If we haven't already laid out this declaration, do so now. Note that we must not call complete type for an external object because it's type might involve templates that we are not supposed to instantiate yet. (And it's perfectly valid to say `extern X x' for some incomplete type `X'.) */ if (!DECL_EXTERNAL (decl)) complete_type (type); if (!DECL_SIZE (decl) && TREE_TYPE (decl) != error_mark_node && complete_or_array_type_p (type)) layout_decl (decl, 0); if (!DECL_EXTERNAL (decl) && DECL_SIZE (decl) == NULL_TREE) { /* An automatic variable with an incomplete type: that is an error. Don't talk about array types here, since we took care of that message in grokdeclarator. */ error_at (DECL_SOURCE_LOCATION (decl), "storage size of %qD isn%'t known", decl); TREE_TYPE (decl) = error_mark_node; } #if 0 /* Keep this code around in case we later want to control debug info based on whether a type is "used". (jason 1999-11-11) */ else if (!DECL_EXTERNAL (decl) && MAYBE_CLASS_TYPE_P (ttype)) /* Let debugger know it should output info for this type. */ note_debug_info_needed (ttype); if (TREE_STATIC (decl) && DECL_CLASS_SCOPE_P (decl)) note_debug_info_needed (DECL_CONTEXT (decl)); #endif if ((DECL_EXTERNAL (decl) || TREE_STATIC (decl)) && DECL_SIZE (decl) != NULL_TREE && ! TREE_CONSTANT (DECL_SIZE (decl))) { if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST && !DECL_LOCAL_DECL_P (decl)) constant_expression_warning (DECL_SIZE (decl)); else { error_at (DECL_SOURCE_LOCATION (decl), "storage size of %qD isn%'t constant", decl); TREE_TYPE (decl) = error_mark_node; } } } /* If a local static variable is declared in an inline function, or if we have a weak definition, we must endeavor to create only one instance of the variable at link-time. */ void maybe_commonize_var (tree decl) { /* Don't mess with __FUNCTION__ and similar. */ if (DECL_ARTIFICIAL (decl)) return; /* Static data in a function with comdat linkage also has comdat linkage. */ if ((TREE_STATIC (decl) && DECL_FUNCTION_SCOPE_P (decl) && vague_linkage_p (DECL_CONTEXT (decl))) || (TREE_PUBLIC (decl) && DECL_INLINE_VAR_P (decl))) { if (flag_weak) { /* With weak symbols, we simply make the variable COMDAT; that will cause copies in multiple translations units to be merged. */ comdat_linkage (decl); } else { if (DECL_INITIAL (decl) == NULL_TREE || DECL_INITIAL (decl) == error_mark_node) { /* Without weak symbols, we can use COMMON to merge uninitialized variables. */ TREE_PUBLIC (decl) = 1; DECL_COMMON (decl) = 1; } else { /* While for initialized variables, we must use internal linkage -- which means that multiple copies will not be merged. */ TREE_PUBLIC (decl) = 0; DECL_COMMON (decl) = 0; DECL_INTERFACE_KNOWN (decl) = 1; const char *msg; if (DECL_INLINE_VAR_P (decl)) msg = G_("sorry: semantics of inline variable " "%q#D are wrong (you%'ll wind up with " "multiple copies)"); else msg = G_("sorry: semantics of inline function " "static data %q#D are wrong (you%'ll wind " "up with multiple copies)"); if (warning_at (DECL_SOURCE_LOCATION (decl), 0, msg, decl)) inform (DECL_SOURCE_LOCATION (decl), "you can work around this by removing the initializer"); } } } } /* Issue an error message if DECL is an uninitialized const variable. CONSTEXPR_CONTEXT_P is true when the function is called in a constexpr context from potential_constant_expression. Returns true if all is well, false otherwise. */ bool check_for_uninitialized_const_var (tree decl, bool constexpr_context_p, tsubst_flags_t complain) { tree type = strip_array_types (TREE_TYPE (decl)); /* ``Unless explicitly declared extern, a const object does not have external linkage and must be initialized. ($8.4; $12.1)'' ARM 7.1.6 */ if (VAR_P (decl) && !TYPE_REF_P (type) && (CP_TYPE_CONST_P (type) /* C++20 permits trivial default initialization in constexpr context (P1331R2). */ || (cxx_dialect < cxx20 && (constexpr_context_p || var_in_constexpr_fn (decl)))) && !DECL_NONTRIVIALLY_INITIALIZED_P (decl)) { tree field = default_init_uninitialized_part (type); if (!field) return true; bool show_notes = true; if (!constexpr_context_p || cxx_dialect >= cxx20) { if (CP_TYPE_CONST_P (type)) { if (complain & tf_error) show_notes = permerror (DECL_SOURCE_LOCATION (decl), "uninitialized %<const %D%>", decl); } else { if (!is_instantiation_of_constexpr (current_function_decl) && (complain & tf_error)) error_at (DECL_SOURCE_LOCATION (decl), "uninitialized variable %qD in %<constexpr%> " "function", decl); else show_notes = false; cp_function_chain->invalid_constexpr = true; } } else if (complain & tf_error) error_at (DECL_SOURCE_LOCATION (decl), "uninitialized variable %qD in %<constexpr%> context", decl); if (show_notes && CLASS_TYPE_P (type) && (complain & tf_error)) { tree defaulted_ctor; inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)), "%q#T has no user-provided default constructor", type); defaulted_ctor = in_class_defaulted_default_constructor (type); if (defaulted_ctor) inform (DECL_SOURCE_LOCATION (defaulted_ctor), "constructor is not user-provided because it is " "explicitly defaulted in the class body"); inform (DECL_SOURCE_LOCATION (field), "and the implicitly-defined constructor does not " "initialize %q#D", field); } return false; } return true; } /* Structure holding the current initializer being processed by reshape_init. CUR is a pointer to the current element being processed, END is a pointer after the last element present in the initializer. */ struct reshape_iter { constructor_elt *cur; constructor_elt *end; }; static tree reshape_init_r (tree, reshape_iter *, tree, tsubst_flags_t); /* FIELD is a FIELD_DECL or NULL. In the former case, the value returned is the next FIELD_DECL (possibly FIELD itself) that can be initialized. If there are no more such fields, the return value will be NULL. */ tree next_initializable_field (tree field) { while (field && (TREE_CODE (field) != FIELD_DECL || DECL_UNNAMED_BIT_FIELD (field) || (DECL_ARTIFICIAL (field) /* In C++17, don't skip base class fields. */ && !(cxx_dialect >= cxx17 && DECL_FIELD_IS_BASE (field)) /* Don't skip vptr fields. We might see them when we're called from reduced_constant_expression_p. */ && !DECL_VIRTUAL_P (field)))) field = DECL_CHAIN (field); return field; } /* Return true for [dcl.init.list] direct-list-initialization from single element of enumeration with a fixed underlying type. */ bool is_direct_enum_init (tree type, tree init) { if (cxx_dialect >= cxx17 && TREE_CODE (type) == ENUMERAL_TYPE && ENUM_FIXED_UNDERLYING_TYPE_P (type) && TREE_CODE (init) == CONSTRUCTOR && CONSTRUCTOR_IS_DIRECT_INIT (init) && CONSTRUCTOR_NELTS (init) == 1) return true; return false; } /* Subroutine of reshape_init_array and reshape_init_vector, which does the actual work. ELT_TYPE is the element type of the array. MAX_INDEX is an INTEGER_CST representing the size of the array minus one (the maximum index), or NULL_TREE if the array was declared without specifying the size. D is the iterator within the constructor. */ static tree reshape_init_array_1 (tree elt_type, tree max_index, reshape_iter *d, tree first_initializer_p, tsubst_flags_t complain) { tree new_init; bool sized_array_p = (max_index && TREE_CONSTANT (max_index)); unsigned HOST_WIDE_INT max_index_cst = 0; unsigned HOST_WIDE_INT index; /* The initializer for an array is always a CONSTRUCTOR. If this is the outermost CONSTRUCTOR and the element type is non-aggregate, we don't need to build a new one. But don't reuse if not complaining; if this is tentative, we might also reshape to another type (95319). */ bool reuse = (first_initializer_p && (complain & tf_error) && !CP_AGGREGATE_TYPE_P (elt_type) && !TREE_SIDE_EFFECTS (first_initializer_p)); if (reuse) new_init = first_initializer_p; else new_init = build_constructor (init_list_type_node, NULL); if (sized_array_p) { /* Minus 1 is used for zero sized arrays. */ if (integer_all_onesp (max_index)) return new_init; if (tree_fits_uhwi_p (max_index)) max_index_cst = tree_to_uhwi (max_index); /* sizetype is sign extended, not zero extended. */ else max_index_cst = tree_to_uhwi (fold_convert (size_type_node, max_index)); } /* Loop until there are no more initializers. */ for (index = 0; d->cur != d->end && (!sized_array_p || index <= max_index_cst); ++index) { tree elt_init; constructor_elt *old_cur = d->cur; check_array_designated_initializer (d->cur, index); elt_init = reshape_init_r (elt_type, d, /*first_initializer_p=*/NULL_TREE, complain); if (elt_init == error_mark_node) return error_mark_node; tree idx = size_int (index); if (reuse) { old_cur->index = idx; old_cur->value = elt_init; } else CONSTRUCTOR_APPEND_ELT (CONSTRUCTOR_ELTS (new_init), idx, elt_init); if (!TREE_CONSTANT (elt_init)) TREE_CONSTANT (new_init) = false; /* This can happen with an invalid initializer (c++/54501). */ if (d->cur == old_cur && !sized_array_p) break; } return new_init; } /* Subroutine of reshape_init_r, processes the initializers for arrays. Parameters are the same of reshape_init_r. */ static tree reshape_init_array (tree type, reshape_iter *d, tree first_initializer_p, tsubst_flags_t complain) { tree max_index = NULL_TREE; gcc_assert (TREE_CODE (type) == ARRAY_TYPE); if (TYPE_DOMAIN (type)) max_index = array_type_nelts (type); return reshape_init_array_1 (TREE_TYPE (type), max_index, d, first_initializer_p, complain); } /* Subroutine of reshape_init_r, processes the initializers for vectors. Parameters are the same of reshape_init_r. */ static tree reshape_init_vector (tree type, reshape_iter *d, tsubst_flags_t complain) { tree max_index = NULL_TREE; gcc_assert (VECTOR_TYPE_P (type)); if (COMPOUND_LITERAL_P (d->cur->value)) { tree value = d->cur->value; if (!same_type_p (TREE_TYPE (value), type)) { if (complain & tf_error) error ("invalid type %qT as initializer for a vector of type %qT", TREE_TYPE (d->cur->value), type); value = error_mark_node; } ++d->cur; return value; } /* For a vector, we initialize it as an array of the appropriate size. */ if (VECTOR_TYPE_P (type)) max_index = size_int (TYPE_VECTOR_SUBPARTS (type) - 1); return reshape_init_array_1 (TREE_TYPE (type), max_index, d, NULL_TREE, complain); } /* Subroutine of reshape_init_r, processes the initializers for classes or union. Parameters are the same of reshape_init_r. */ static tree reshape_init_class (tree type, reshape_iter *d, bool first_initializer_p, tsubst_flags_t complain) { tree field; tree new_init; gcc_assert (CLASS_TYPE_P (type)); /* The initializer for a class is always a CONSTRUCTOR. */ new_init = build_constructor (init_list_type_node, NULL); int binfo_idx = -1; tree binfo = TYPE_BINFO (type); tree base_binfo = NULL_TREE; if (cxx_dialect >= cxx17 && uses_template_parms (type)) { /* We get here from maybe_aggr_guide for C++20 class template argument deduction. In this case we need to look through the binfo because a template doesn't have base fields. */ binfo_idx = 0; BINFO_BASE_ITERATE (binfo, binfo_idx, base_binfo); } if (base_binfo) field = base_binfo; else field = next_initializable_field (TYPE_FIELDS (type)); if (!field) { /* [dcl.init.aggr] An initializer for an aggregate member that is an empty class shall have the form of an empty initializer-list {}. */ if (!first_initializer_p) { if (complain & tf_error) error ("initializer for %qT must be brace-enclosed", type); return error_mark_node; } return new_init; } /* For C++20 CTAD, handle pack expansions in the base list. */ tree last_was_pack_expansion = NULL_TREE; /* Loop through the initializable fields, gathering initializers. */ while (d->cur != d->end) { tree field_init; constructor_elt *old_cur = d->cur; /* Handle designated initializers, as an extension. */ if (d->cur->index) { if (d->cur->index == error_mark_node) return error_mark_node; if (TREE_CODE (d->cur->index) == FIELD_DECL) { /* We already reshaped this. */ if (field != d->cur->index) { tree id = DECL_NAME (d->cur->index); gcc_assert (id); gcc_checking_assert (d->cur->index == get_class_binding (type, id)); field = d->cur->index; } } else if (TREE_CODE (d->cur->index) == IDENTIFIER_NODE) field = get_class_binding (type, d->cur->index); else { if (complain & tf_error) error ("%<[%E] =%> used in a GNU-style designated initializer" " for class %qT", d->cur->index, type); return error_mark_node; } if (!field || TREE_CODE (field) != FIELD_DECL) { if (complain & tf_error) error ("%qT has no non-static data member named %qD", type, d->cur->index); return error_mark_node; } } /* If we processed all the member of the class, we are done. */ if (!field) break; last_was_pack_expansion = (PACK_EXPANSION_P (TREE_TYPE (field)) ? field : NULL_TREE); if (last_was_pack_expansion) /* Each non-trailing aggregate element that is a pack expansion is assumed to correspond to no elements of the initializer list. */ goto continue_; field_init = reshape_init_r (TREE_TYPE (field), d, /*first_initializer_p=*/NULL_TREE, complain); if (field_init == error_mark_node) return error_mark_node; if (d->cur == old_cur && d->cur->index) { /* This can happen with an invalid initializer for a flexible array member (c++/54441). */ if (complain & tf_error) error ("invalid initializer for %q#D", field); return error_mark_node; } CONSTRUCTOR_APPEND_ELT (CONSTRUCTOR_ELTS (new_init), field, field_init); /* [dcl.init.aggr] When a union is initialized with a brace-enclosed initializer, the braces shall only contain an initializer for the first member of the union. */ if (TREE_CODE (type) == UNION_TYPE) break; continue_: if (base_binfo) { BINFO_BASE_ITERATE (binfo, ++binfo_idx, base_binfo); if (base_binfo) field = base_binfo; else field = next_initializable_field (TYPE_FIELDS (type)); } else field = next_initializable_field (DECL_CHAIN (field)); } /* A trailing aggregate element that is a pack expansion is assumed to correspond to all remaining elements of the initializer list (if any). */ if (last_was_pack_expansion) { CONSTRUCTOR_APPEND_ELT (CONSTRUCTOR_ELTS (new_init), last_was_pack_expansion, d->cur->value); while (d->cur != d->end) d->cur++; } return new_init; } /* Subroutine of reshape_init_r. We're in a context where C99 initializer designators are not valid; either complain or return true to indicate that reshape_init_r should return error_mark_node. */ static bool has_designator_problem (reshape_iter *d, tsubst_flags_t complain) { if (d->cur->index) { if (complain & tf_error) error_at (cp_expr_loc_or_input_loc (d->cur->index), "C99 designator %qE outside aggregate initializer", d->cur->index); else return true; } return false; } /* Subroutine of reshape_init, which processes a single initializer (part of a CONSTRUCTOR). TYPE is the type of the variable being initialized, D is the iterator within the CONSTRUCTOR which points to the initializer to process. If this is the first initializer of the outermost CONSTRUCTOR node, FIRST_INITIALIZER_P is that CONSTRUCTOR; otherwise, it is NULL_TREE. */ static tree reshape_init_r (tree type, reshape_iter *d, tree first_initializer_p, tsubst_flags_t complain) { tree init = d->cur->value; if (error_operand_p (init)) return error_mark_node; if (first_initializer_p && !CP_AGGREGATE_TYPE_P (type) && has_designator_problem (d, complain)) return error_mark_node; tree stripped_init = tree_strip_any_location_wrapper (init); if (TREE_CODE (type) == COMPLEX_TYPE) { /* A complex type can be initialized from one or two initializers, but braces are not elided. */ d->cur++; if (BRACE_ENCLOSED_INITIALIZER_P (stripped_init)) { if (CONSTRUCTOR_NELTS (stripped_init) > 2) { if (complain & tf_error) error ("too many initializers for %qT", type); else return error_mark_node; } } else if (first_initializer_p && d->cur != d->end) { vec<constructor_elt, va_gc> *v = 0; CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, d->cur->value); if (has_designator_problem (d, complain)) return error_mark_node; d->cur++; init = build_constructor (init_list_type_node, v); } return init; } /* A non-aggregate type is always initialized with a single initializer. */ if (!CP_AGGREGATE_TYPE_P (type) /* As is an array with dependent bound. */ || (cxx_dialect >= cxx20 && TREE_CODE (type) == ARRAY_TYPE && uses_template_parms (TYPE_DOMAIN (type)))) { /* It is invalid to initialize a non-aggregate type with a brace-enclosed initializer before C++0x. We need to check for BRACE_ENCLOSED_INITIALIZER_P here because of g++.old-deja/g++.mike/p7626.C: a pointer-to-member constant is a CONSTRUCTOR (with a record type). */ if (TREE_CODE (stripped_init) == CONSTRUCTOR /* Don't complain about a capture-init. */ && !CONSTRUCTOR_IS_DIRECT_INIT (stripped_init) && BRACE_ENCLOSED_INITIALIZER_P (stripped_init)) /* p7626.C */ { if (SCALAR_TYPE_P (type)) { if (cxx_dialect < cxx11) { if (complain & tf_error) error ("braces around scalar initializer for type %qT", type); init = error_mark_node; } else if (first_initializer_p || (CONSTRUCTOR_NELTS (stripped_init) > 0 && (BRACE_ENCLOSED_INITIALIZER_P (CONSTRUCTOR_ELT (stripped_init,0)->value)))) { if (complain & tf_error) error ("too many braces around scalar initializer " "for type %qT", type); init = error_mark_node; } } else maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); } d->cur++; return init; } /* "If T is a class type and the initializer list has a single element of type cv U, where U is T or a class derived from T, the object is initialized from that element." Even if T is an aggregate. */ if (cxx_dialect >= cxx11 && (CLASS_TYPE_P (type) || VECTOR_TYPE_P (type)) && first_initializer_p && d->end - d->cur == 1 && reference_related_p (type, TREE_TYPE (init))) { d->cur++; return init; } /* [dcl.init.aggr] All implicit type conversions (clause _conv_) are considered when initializing the aggregate member with an initializer from an initializer-list. If the initializer can initialize a member, the member is initialized. Otherwise, if the member is itself a non-empty subaggregate, brace elision is assumed and the initializer is considered for the initialization of the first member of the subaggregate. */ if ((TREE_CODE (init) != CONSTRUCTOR || COMPOUND_LITERAL_P (init)) /* But don't try this for the first initializer, since that would be looking through the outermost braces; A a2 = { a1 }; is not a valid aggregate initialization. */ && !first_initializer_p && (same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (init)) || can_convert_arg (type, TREE_TYPE (init), init, LOOKUP_NORMAL, complain))) { d->cur++; return init; } /* [dcl.init.string] A char array (whether plain char, signed char, or unsigned char) can be initialized by a string-literal (optionally enclosed in braces); a wchar_t array can be initialized by a wide string-literal (optionally enclosed in braces). */ if (TREE_CODE (type) == ARRAY_TYPE && char_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (type)))) { tree str_init = init; tree stripped_str_init = stripped_init; /* Strip one level of braces if and only if they enclose a single element (as allowed by [dcl.init.string]). */ if (!first_initializer_p && TREE_CODE (stripped_str_init) == CONSTRUCTOR && CONSTRUCTOR_NELTS (stripped_str_init) == 1) { str_init = (*CONSTRUCTOR_ELTS (stripped_str_init))[0].value; stripped_str_init = tree_strip_any_location_wrapper (str_init); } /* If it's a string literal, then it's the initializer for the array as a whole. Otherwise, continue with normal initialization for array types (one value per array element). */ if (TREE_CODE (stripped_str_init) == STRING_CST) { if (has_designator_problem (d, complain)) return error_mark_node; d->cur++; return str_init; } } /* The following cases are about aggregates. If we are not within a full initializer already, and there is not a CONSTRUCTOR, it means that there is a missing set of braces (that is, we are processing the case for which reshape_init exists). */ if (!first_initializer_p) { if (TREE_CODE (stripped_init) == CONSTRUCTOR) { tree init_type = TREE_TYPE (init); if (init_type && TYPE_PTRMEMFUNC_P (init_type)) /* There is no need to call reshape_init for pointer-to-member function initializers, as they are always constructed correctly by the front end. Here we have e.g. {.__pfn=0B, .__delta=0}, which is missing outermost braces. We should warn below, and one of the routines below will wrap it in additional { }. */; /* For a nested compound literal, proceed to specialized routines, to handle initialization of arrays and similar. */ else if (COMPOUND_LITERAL_P (stripped_init)) gcc_assert (!BRACE_ENCLOSED_INITIALIZER_P (stripped_init)); /* A CONSTRUCTOR of the target's type is a previously digested initializer. */ else if (same_type_ignoring_top_level_qualifiers_p (type, init_type)) { ++d->cur; return init; } else { /* Something that hasn't been reshaped yet. */ ++d->cur; gcc_assert (BRACE_ENCLOSED_INITIALIZER_P (stripped_init)); return reshape_init (type, init, complain); } } if (complain & tf_warning) warning (OPT_Wmissing_braces, "missing braces around initializer for %qT", type); } /* Dispatch to specialized routines. */ if (CLASS_TYPE_P (type)) return reshape_init_class (type, d, first_initializer_p, complain); else if (TREE_CODE (type) == ARRAY_TYPE) return reshape_init_array (type, d, first_initializer_p, complain); else if (VECTOR_TYPE_P (type)) return reshape_init_vector (type, d, complain); else gcc_unreachable(); } /* Undo the brace-elision allowed by [dcl.init.aggr] in a brace-enclosed aggregate initializer. INIT is the CONSTRUCTOR containing the list of initializers describing a brace-enclosed initializer for an entity of the indicated aggregate TYPE. It may not presently match the shape of the TYPE; for example: struct S { int a; int b; }; struct S a[] = { 1, 2, 3, 4 }; Here INIT will hold a vector of four elements, rather than a vector of two elements, each itself a vector of two elements. This routine transforms INIT from the former form into the latter. The revised CONSTRUCTOR node is returned. */ tree reshape_init (tree type, tree init, tsubst_flags_t complain) { vec<constructor_elt, va_gc> *v; reshape_iter d; tree new_init; gcc_assert (BRACE_ENCLOSED_INITIALIZER_P (init)); v = CONSTRUCTOR_ELTS (init); /* An empty constructor does not need reshaping, and it is always a valid initializer. */ if (vec_safe_is_empty (v)) return init; /* Brace elision is not performed for a CONSTRUCTOR representing parenthesized aggregate initialization. */ if (CONSTRUCTOR_IS_PAREN_INIT (init)) { tree elt = (*v)[0].value; /* If we're initializing a char array from a string-literal that is enclosed in braces, unwrap it here. */ if (TREE_CODE (type) == ARRAY_TYPE && vec_safe_length (v) == 1 && char_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (type))) && TREE_CODE (tree_strip_any_location_wrapper (elt)) == STRING_CST) return elt; return init; } /* Handle [dcl.init.list] direct-list-initialization from single element of enumeration with a fixed underlying type. */ if (is_direct_enum_init (type, init)) { tree elt = CONSTRUCTOR_ELT (init, 0)->value; type = cv_unqualified (type); if (check_narrowing (ENUM_UNDERLYING_TYPE (type), elt, complain)) { warning_sentinel w (warn_useless_cast); warning_sentinel w2 (warn_ignored_qualifiers); return cp_build_c_cast (input_location, type, elt, tf_warning_or_error); } else return error_mark_node; } /* Recurse on this CONSTRUCTOR. */ d.cur = &(*v)[0]; d.end = d.cur + v->length (); new_init = reshape_init_r (type, &d, init, complain); if (new_init == error_mark_node) return error_mark_node; /* Make sure all the element of the constructor were used. Otherwise, issue an error about exceeding initializers. */ if (d.cur != d.end) { if (complain & tf_error) error ("too many initializers for %qT", type); return error_mark_node; } if (CONSTRUCTOR_IS_DIRECT_INIT (init) && BRACE_ENCLOSED_INITIALIZER_P (new_init)) CONSTRUCTOR_IS_DIRECT_INIT (new_init) = true; if (CONSTRUCTOR_IS_DESIGNATED_INIT (init) && BRACE_ENCLOSED_INITIALIZER_P (new_init)) CONSTRUCTOR_IS_DESIGNATED_INIT (new_init) = true; return new_init; } /* Verify array initializer. Returns true if errors have been reported. */ bool check_array_initializer (tree decl, tree type, tree init) { tree element_type = TREE_TYPE (type); /* The array type itself need not be complete, because the initializer may tell us how many elements are in the array. But, the elements of the array must be complete. */ if (!COMPLETE_TYPE_P (complete_type (element_type))) { if (decl) error_at (DECL_SOURCE_LOCATION (decl), "elements of array %q#D have incomplete type", decl); else error ("elements of array %q#T have incomplete type", type); return true; } location_t loc = (decl ? location_of (decl) : input_location); if (!verify_type_context (loc, TCTX_ARRAY_ELEMENT, element_type)) return true; /* A compound literal can't have variable size. */ if (init && !decl && ((COMPLETE_TYPE_P (type) && !TREE_CONSTANT (TYPE_SIZE (type))) || !TREE_CONSTANT (TYPE_SIZE (element_type)))) { error ("variable-sized compound literal"); return true; } return false; } /* Subroutine of check_initializer; args are passed down from that function. Set stmts_are_full_exprs_p to 1 across a call to build_aggr_init. */ static tree build_aggr_init_full_exprs (tree decl, tree init, int flags) { gcc_assert (stmts_are_full_exprs_p ()); return build_aggr_init (decl, init, flags, tf_warning_or_error); } /* Verify INIT (the initializer for DECL), and record the initialization in DECL_INITIAL, if appropriate. CLEANUP is as for grok_reference_init. If the return value is non-NULL, it is an expression that must be evaluated dynamically to initialize DECL. */ static tree check_initializer (tree decl, tree init, int flags, vec<tree, va_gc> **cleanups) { tree type; tree init_code = NULL; tree core_type; /* Things that are going to be initialized need to have complete type. */ TREE_TYPE (decl) = type = complete_type (TREE_TYPE (decl)); if (DECL_HAS_VALUE_EXPR_P (decl)) { /* A variable with DECL_HAS_VALUE_EXPR_P set is just a placeholder, it doesn't have storage to be initialized. */ gcc_assert (init == NULL_TREE); return NULL_TREE; } if (type == error_mark_node) /* We will have already complained. */ return NULL_TREE; if (TREE_CODE (type) == ARRAY_TYPE) { if (check_array_initializer (decl, type, init)) return NULL_TREE; } else if (!COMPLETE_TYPE_P (type)) { error_at (DECL_SOURCE_LOCATION (decl), "%q#D has incomplete type", decl); TREE_TYPE (decl) = error_mark_node; return NULL_TREE; } else /* There is no way to make a variable-sized class type in GNU C++. */ gcc_assert (TREE_CONSTANT (TYPE_SIZE (type))); if (init && BRACE_ENCLOSED_INITIALIZER_P (init)) { int init_len = CONSTRUCTOR_NELTS (init); if (SCALAR_TYPE_P (type)) { if (init_len == 0) { maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); init = build_zero_init (type, NULL_TREE, false); } else if (init_len != 1 && TREE_CODE (type) != COMPLEX_TYPE) { error_at (cp_expr_loc_or_loc (init, DECL_SOURCE_LOCATION (decl)), "scalar object %qD requires one element in " "initializer", decl); TREE_TYPE (decl) = error_mark_node; return NULL_TREE; } } } if (TREE_CODE (decl) == CONST_DECL) { gcc_assert (!TYPE_REF_P (type)); DECL_INITIAL (decl) = init; gcc_assert (init != NULL_TREE); init = NULL_TREE; } else if (!init && DECL_REALLY_EXTERN (decl)) ; else if (init || type_build_ctor_call (type) || TYPE_REF_P (type)) { if (TYPE_REF_P (type)) { init = grok_reference_init (decl, type, init, flags); flags |= LOOKUP_ALREADY_DIGESTED; } else if (!init) check_for_uninitialized_const_var (decl, /*constexpr_context_p=*/false, tf_warning_or_error); /* Do not reshape constructors of vectors (they don't need to be reshaped. */ else if (BRACE_ENCLOSED_INITIALIZER_P (init)) { if (is_std_init_list (type)) { init = perform_implicit_conversion (type, init, tf_warning_or_error); flags |= LOOKUP_ALREADY_DIGESTED; } else if (TYPE_NON_AGGREGATE_CLASS (type)) { /* Don't reshape if the class has constructors. */ if (cxx_dialect == cxx98) error_at (cp_expr_loc_or_loc (init, DECL_SOURCE_LOCATION (decl)), "in C++98 %qD must be initialized by " "constructor, not by %<{...}%>", decl); } else if (VECTOR_TYPE_P (type) && TYPE_VECTOR_OPAQUE (type)) { error ("opaque vector types cannot be initialized"); init = error_mark_node; } else { init = reshape_init (type, init, tf_warning_or_error); flags |= LOOKUP_NO_NARROWING; } } /* [dcl.init] "Otherwise, if the destination type is an array, the object is initialized as follows..." So handle things like int a[](1, 2, 3); which is permitted in C++20 by P0960. */ else if (TREE_CODE (init) == TREE_LIST && TREE_TYPE (init) == NULL_TREE && TREE_CODE (type) == ARRAY_TYPE && !DECL_DECOMPOSITION_P (decl) && (cxx_dialect >= cxx20)) init = do_aggregate_paren_init (init, type); else if (TREE_CODE (init) == TREE_LIST && TREE_TYPE (init) != unknown_type_node && !MAYBE_CLASS_TYPE_P (type)) { gcc_assert (TREE_CODE (decl) != RESULT_DECL); /* We get here with code like `int a (2);' */ init = build_x_compound_expr_from_list (init, ELK_INIT, tf_warning_or_error); } /* If DECL has an array type without a specific bound, deduce the array size from the initializer. */ maybe_deduce_size_from_array_init (decl, init); type = TREE_TYPE (decl); if (type == error_mark_node) return NULL_TREE; if (((type_build_ctor_call (type) || CLASS_TYPE_P (type)) && !(flags & LOOKUP_ALREADY_DIGESTED) && !(init && BRACE_ENCLOSED_INITIALIZER_P (init) && CP_AGGREGATE_TYPE_P (type) && (CLASS_TYPE_P (type) || !TYPE_NEEDS_CONSTRUCTING (type) || type_has_extended_temps (type)))) || (DECL_DECOMPOSITION_P (decl) && TREE_CODE (type) == ARRAY_TYPE)) { init_code = build_aggr_init_full_exprs (decl, init, flags); /* A constructor call is a non-trivial initializer even if it isn't explicitly written. */ if (TREE_SIDE_EFFECTS (init_code)) DECL_NONTRIVIALLY_INITIALIZED_P (decl) = true; /* If this is a constexpr initializer, expand_default_init will have returned an INIT_EXPR rather than a CALL_EXPR. In that case, pull the initializer back out and pass it down into store_init_value. */ while (TREE_CODE (init_code) == EXPR_STMT || TREE_CODE (init_code) == CONVERT_EXPR) init_code = TREE_OPERAND (init_code, 0); if (TREE_CODE (init_code) == INIT_EXPR) { /* In C++20, the call to build_aggr_init could have created an INIT_EXPR with a CONSTRUCTOR as the RHS to handle A(1, 2). */ init = TREE_OPERAND (init_code, 1); init_code = NULL_TREE; /* Don't call digest_init; it's unnecessary and will complain about aggregate initialization of non-aggregate classes. */ flags |= LOOKUP_ALREADY_DIGESTED; } else if (DECL_DECLARED_CONSTEXPR_P (decl) || DECL_DECLARED_CONSTINIT_P (decl)) { /* Declared constexpr or constinit, but no suitable initializer; massage init appropriately so we can pass it into store_init_value for the error. */ if (CLASS_TYPE_P (type) && (!init || TREE_CODE (init) == TREE_LIST)) { init = build_functional_cast (input_location, type, init, tf_none); if (TREE_CODE (init) == TARGET_EXPR) TARGET_EXPR_DIRECT_INIT_P (init) = true; } init_code = NULL_TREE; } else init = NULL_TREE; } if (init && TREE_CODE (init) != TREE_VEC) { /* In aggregate initialization of a variable, each element initialization is a full-expression because there is no enclosing expression. */ gcc_assert (stmts_are_full_exprs_p ()); init_code = store_init_value (decl, init, cleanups, flags); if (DECL_INITIAL (decl) && TREE_CODE (DECL_INITIAL (decl)) == CONSTRUCTOR && !vec_safe_is_empty (CONSTRUCTOR_ELTS (DECL_INITIAL (decl)))) { tree elt = CONSTRUCTOR_ELTS (DECL_INITIAL (decl))->last ().value; if (TREE_CODE (TREE_TYPE (elt)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (elt)) == NULL_TREE) cp_complete_array_type (&TREE_TYPE (elt), elt, false); } if (pedantic && TREE_CODE (type) == ARRAY_TYPE && DECL_INITIAL (decl) && TREE_CODE (DECL_INITIAL (decl)) == STRING_CST && PAREN_STRING_LITERAL_P (DECL_INITIAL (decl))) warning_at (cp_expr_loc_or_loc (DECL_INITIAL (decl), DECL_SOURCE_LOCATION (decl)), 0, "array %qD initialized by parenthesized " "string literal %qE", decl, DECL_INITIAL (decl)); init = NULL_TREE; } } else { if (CLASS_TYPE_P (core_type = strip_array_types (type)) && (CLASSTYPE_READONLY_FIELDS_NEED_INIT (core_type) || CLASSTYPE_REF_FIELDS_NEED_INIT (core_type))) diagnose_uninitialized_cst_or_ref_member (core_type, /*using_new=*/false, /*complain=*/true); check_for_uninitialized_const_var (decl, /*constexpr_context_p=*/false, tf_warning_or_error); } if (init && init != error_mark_node) init_code = build2 (INIT_EXPR, type, decl, init); if (init_code) { /* We might have set these in cp_finish_decl. */ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = false; TREE_CONSTANT (decl) = false; } if (init_code && DECL_IN_AGGR_P (decl) && DECL_INITIALIZED_IN_CLASS_P (decl)) { static int explained = 0; if (cxx_dialect < cxx11) error ("initializer invalid for static member with constructor"); else if (cxx_dialect < cxx17) error ("non-constant in-class initialization invalid for static " "member %qD", decl); else error ("non-constant in-class initialization invalid for non-inline " "static member %qD", decl); if (!explained) { inform (input_location, "(an out of class initialization is required)"); explained = 1; } return NULL_TREE; } return init_code; } /* If DECL is not a local variable, give it RTL. */ static void make_rtl_for_nonlocal_decl (tree decl, tree init, const char* asmspec) { int toplev = toplevel_bindings_p (); int defer_p; /* Set the DECL_ASSEMBLER_NAME for the object. */ if (asmspec) { /* The `register' keyword, when used together with an asm-specification, indicates that the variable should be placed in a particular register. */ if (VAR_P (decl) && DECL_REGISTER (decl)) { set_user_assembler_name (decl, asmspec); DECL_HARD_REGISTER (decl) = 1; } else { if (TREE_CODE (decl) == FUNCTION_DECL && fndecl_built_in_p (decl, BUILT_IN_NORMAL)) set_builtin_user_assembler_name (decl, asmspec); set_user_assembler_name (decl, asmspec); } } /* Handle non-variables up front. */ if (!VAR_P (decl)) { rest_of_decl_compilation (decl, toplev, at_eof); return; } /* If we see a class member here, it should be a static data member. */ if (DECL_LANG_SPECIFIC (decl) && DECL_IN_AGGR_P (decl)) { gcc_assert (TREE_STATIC (decl)); /* An in-class declaration of a static data member should be external; it is only a declaration, and not a definition. */ if (init == NULL_TREE) gcc_assert (DECL_EXTERNAL (decl) || !TREE_PUBLIC (decl)); } /* We don't create any RTL for local variables. */ if (DECL_FUNCTION_SCOPE_P (decl) && !TREE_STATIC (decl)) return; /* We defer emission of local statics until the corresponding DECL_EXPR is expanded. But with constexpr its function might never be expanded, so go ahead and tell cgraph about the variable now. */ defer_p = ((DECL_FUNCTION_SCOPE_P (decl) && !var_in_maybe_constexpr_fn (decl)) || DECL_VIRTUAL_P (decl)); /* Defer template instantiations. */ if (DECL_LANG_SPECIFIC (decl) && DECL_IMPLICIT_INSTANTIATION (decl)) defer_p = 1; /* If we're not deferring, go ahead and assemble the variable. */ if (!defer_p) rest_of_decl_compilation (decl, toplev, at_eof); } /* walk_tree helper for wrap_temporary_cleanups, below. */ static tree wrap_cleanups_r (tree *stmt_p, int *walk_subtrees, void *data) { /* Stop at types or full-expression boundaries. */ if (TYPE_P (*stmt_p) || TREE_CODE (*stmt_p) == CLEANUP_POINT_EXPR) { *walk_subtrees = 0; return NULL_TREE; } if (TREE_CODE (*stmt_p) == TARGET_EXPR) { tree guard = (tree)data; tree tcleanup = TARGET_EXPR_CLEANUP (*stmt_p); tcleanup = build2 (TRY_CATCH_EXPR, void_type_node, tcleanup, guard); /* Tell honor_protect_cleanup_actions to handle this as a separate cleanup. */ TRY_CATCH_IS_CLEANUP (tcleanup) = 1; TARGET_EXPR_CLEANUP (*stmt_p) = tcleanup; } return NULL_TREE; } /* We're initializing a local variable which has a cleanup GUARD. If there are any temporaries used in the initializer INIT of this variable, we need to wrap their cleanups with TRY_CATCH_EXPR (, GUARD) so that the variable will be cleaned up properly if one of them throws. Unfortunately, there's no way to express this properly in terms of nesting, as the regions for the temporaries overlap the region for the variable itself; if there are two temporaries, the variable needs to be the first thing destroyed if either of them throws. However, we only want to run the variable's cleanup if it actually got constructed. So we need to guard the temporary cleanups with the variable's cleanup if they are run on the normal path, but not if they are run on the exceptional path. We implement this by telling honor_protect_cleanup_actions to strip the variable cleanup from the exceptional path. */ static void wrap_temporary_cleanups (tree init, tree guard) { cp_walk_tree_without_duplicates (&init, wrap_cleanups_r, (void *)guard); } /* Generate code to initialize DECL (a local variable). */ static void initialize_local_var (tree decl, tree init) { tree type = TREE_TYPE (decl); tree cleanup; int already_used; gcc_assert (VAR_P (decl) || TREE_CODE (decl) == RESULT_DECL); gcc_assert (!TREE_STATIC (decl)); if (DECL_SIZE (decl) == NULL_TREE) { /* If we used it already as memory, it must stay in memory. */ DECL_INITIAL (decl) = NULL_TREE; TREE_ADDRESSABLE (decl) = TREE_USED (decl); return; } if (type == error_mark_node) return; /* Compute and store the initial value. */ already_used = TREE_USED (decl) || TREE_USED (type); if (TREE_USED (type)) DECL_READ_P (decl) = 1; /* Generate a cleanup, if necessary. */ cleanup = cxx_maybe_build_cleanup (decl, tf_warning_or_error); /* Perform the initialization. */ if (init) { tree rinit = (TREE_CODE (init) == INIT_EXPR ? TREE_OPERAND (init, 1) : NULL_TREE); if (rinit && !TREE_SIDE_EFFECTS (rinit)) { /* Stick simple initializers in DECL_INITIAL so that -Wno-init-self works (c++/34772). */ gcc_assert (TREE_OPERAND (init, 0) == decl); DECL_INITIAL (decl) = rinit; if (warn_init_self && TYPE_REF_P (type)) { STRIP_NOPS (rinit); if (rinit == decl) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Winit_self, "reference %qD is initialized with itself", decl); } } else { int saved_stmts_are_full_exprs_p; /* If we're only initializing a single object, guard the destructors of any temporaries used in its initializer with its destructor. This isn't right for arrays because each element initialization is a full-expression. */ if (cleanup && TREE_CODE (type) != ARRAY_TYPE) wrap_temporary_cleanups (init, cleanup); gcc_assert (building_stmt_list_p ()); saved_stmts_are_full_exprs_p = stmts_are_full_exprs_p (); current_stmt_tree ()->stmts_are_full_exprs_p = 1; finish_expr_stmt (init); current_stmt_tree ()->stmts_are_full_exprs_p = saved_stmts_are_full_exprs_p; } } /* Set this to 0 so we can tell whether an aggregate which was initialized was ever used. Don't do this if it has a destructor, so we don't complain about the 'resource allocation is initialization' idiom. Now set attribute((unused)) on types so decls of that type will be marked used. (see TREE_USED, above.) */ if (TYPE_NEEDS_CONSTRUCTING (type) && ! already_used && TYPE_HAS_TRIVIAL_DESTRUCTOR (type) && DECL_NAME (decl)) TREE_USED (decl) = 0; else if (already_used) TREE_USED (decl) = 1; if (cleanup) finish_decl_cleanup (decl, cleanup); } /* DECL is a VAR_DECL for a compiler-generated variable with static storage duration (like a virtual table) whose initializer is a compile-time constant. Initialize the variable and provide it to the back end. */ void initialize_artificial_var (tree decl, vec<constructor_elt, va_gc> *v) { tree init; gcc_assert (DECL_ARTIFICIAL (decl)); init = build_constructor (TREE_TYPE (decl), v); gcc_assert (TREE_CODE (init) == CONSTRUCTOR); DECL_INITIAL (decl) = init; DECL_INITIALIZED_P (decl) = 1; /* Mark the decl as constexpr so that we can access its content at compile time. */ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = true; DECL_DECLARED_CONSTEXPR_P (decl) = true; determine_visibility (decl); layout_var_decl (decl); maybe_commonize_var (decl); make_rtl_for_nonlocal_decl (decl, init, /*asmspec=*/NULL); } /* INIT is the initializer for a variable, as represented by the parser. Returns true iff INIT is value-dependent. */ static bool value_dependent_init_p (tree init) { if (TREE_CODE (init) == TREE_LIST) /* A parenthesized initializer, e.g.: int i (3, 2); ? */ return any_value_dependent_elements_p (init); else if (TREE_CODE (init) == CONSTRUCTOR) /* A brace-enclosed initializer, e.g.: int i = { 3 }; ? */ { if (dependent_type_p (TREE_TYPE (init))) return true; vec<constructor_elt, va_gc> *elts; size_t nelts; size_t i; elts = CONSTRUCTOR_ELTS (init); nelts = vec_safe_length (elts); for (i = 0; i < nelts; ++i) if (value_dependent_init_p ((*elts)[i].value)) return true; } else /* It must be a simple expression, e.g., int i = 3; */ return value_dependent_expression_p (init); return false; } // Returns true if a DECL is VAR_DECL with the concept specifier. static inline bool is_concept_var (tree decl) { return (VAR_P (decl) // Not all variables have DECL_LANG_SPECIFIC. && DECL_LANG_SPECIFIC (decl) && DECL_DECLARED_CONCEPT_P (decl)); } /* A helper function to be called via walk_tree. If any label exists under *TP, it is (going to be) forced. Set has_forced_label_in_static. */ static tree notice_forced_label_r (tree *tp, int *walk_subtrees, void *) { if (TYPE_P (*tp)) *walk_subtrees = 0; if (TREE_CODE (*tp) == LABEL_DECL) cfun->has_forced_label_in_static = 1; return NULL_TREE; } /* Return true if DECL has either a trivial destructor, or for C++20 is constexpr and has a constexpr destructor. */ static bool decl_maybe_constant_destruction (tree decl, tree type) { return (TYPE_HAS_TRIVIAL_DESTRUCTOR (type) || (cxx_dialect >= cxx20 && VAR_P (decl) && DECL_DECLARED_CONSTEXPR_P (decl) && type_has_constexpr_destructor (strip_array_types (type)))); } static tree declare_simd_adjust_this (tree *, int *, void *); /* Helper function of omp_declare_variant_finalize. Finalize one "omp declare variant base" attribute. Return true if it should be removed. */ static bool omp_declare_variant_finalize_one (tree decl, tree attr) { if (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE) { walk_tree (&TREE_VALUE (TREE_VALUE (attr)), declare_simd_adjust_this, DECL_ARGUMENTS (decl), NULL); walk_tree (&TREE_PURPOSE (TREE_VALUE (attr)), declare_simd_adjust_this, DECL_ARGUMENTS (decl), NULL); } tree ctx = TREE_VALUE (TREE_VALUE (attr)); tree simd = omp_get_context_selector (ctx, "construct", "simd"); if (simd) { TREE_VALUE (simd) = c_omp_declare_simd_clauses_to_numbers (DECL_ARGUMENTS (decl), TREE_VALUE (simd)); /* FIXME, adjusting simd args unimplemented. */ return true; } tree chain = TREE_CHAIN (TREE_VALUE (attr)); location_t varid_loc = cp_expr_loc_or_input_loc (TREE_PURPOSE (TREE_CHAIN (chain))); location_t match_loc = cp_expr_loc_or_input_loc (TREE_PURPOSE (chain)); cp_id_kind idk = (cp_id_kind) tree_to_uhwi (TREE_VALUE (chain)); tree variant = TREE_PURPOSE (TREE_VALUE (attr)); location_t save_loc = input_location; input_location = varid_loc; releasing_vec args; tree parm = DECL_ARGUMENTS (decl); if (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE) parm = DECL_CHAIN (parm); for (; parm; parm = DECL_CHAIN (parm)) if (type_dependent_expression_p (parm)) vec_safe_push (args, build_constructor (TREE_TYPE (parm), NULL)); else if (MAYBE_CLASS_TYPE_P (TREE_TYPE (parm))) vec_safe_push (args, build_local_temp (TREE_TYPE (parm))); else vec_safe_push (args, build_zero_cst (TREE_TYPE (parm))); bool koenig_p = false; if (idk == CP_ID_KIND_UNQUALIFIED || idk == CP_ID_KIND_TEMPLATE_ID) { if (identifier_p (variant) /* In C++20, we may need to perform ADL for a template name. */ || (TREE_CODE (variant) == TEMPLATE_ID_EXPR && identifier_p (TREE_OPERAND (variant, 0)))) { if (!args->is_empty ()) { koenig_p = true; if (!any_type_dependent_arguments_p (args)) variant = perform_koenig_lookup (variant, args, tf_warning_or_error); } else variant = unqualified_fn_lookup_error (variant); } else if (!args->is_empty () && is_overloaded_fn (variant)) { tree fn = get_first_fn (variant); fn = STRIP_TEMPLATE (fn); if (!((TREE_CODE (fn) == USING_DECL && DECL_DEPENDENT_P (fn)) || DECL_FUNCTION_MEMBER_P (fn) || DECL_LOCAL_DECL_P (fn))) { koenig_p = true; if (!any_type_dependent_arguments_p (args)) variant = perform_koenig_lookup (variant, args, tf_warning_or_error); } } } if (idk == CP_ID_KIND_QUALIFIED) variant = finish_call_expr (variant, &args, /*disallow_virtual=*/true, koenig_p, tf_warning_or_error); else variant = finish_call_expr (variant, &args, /*disallow_virtual=*/false, koenig_p, tf_warning_or_error); if (variant == error_mark_node && !processing_template_decl) return true; variant = cp_get_callee_fndecl_nofold (variant); input_location = save_loc; if (variant) { const char *varname = IDENTIFIER_POINTER (DECL_NAME (variant)); if (!comptypes (TREE_TYPE (decl), TREE_TYPE (variant), 0)) { error_at (varid_loc, "variant %qD and base %qD have incompatible " "types", variant, decl); return true; } if (fndecl_built_in_p (variant) && (strncmp (varname, "__builtin_", strlen ("__builtin_")) == 0 || strncmp (varname, "__sync_", strlen ("__sync_")) == 0 || strncmp (varname, "__atomic_", strlen ("__atomic_")) == 0)) { error_at (varid_loc, "variant %qD is a built-in", variant); return true; } else { tree construct = omp_get_context_selector (ctx, "construct", NULL); c_omp_mark_declare_variant (match_loc, variant, construct); if (!omp_context_selector_matches (ctx)) return true; TREE_PURPOSE (TREE_VALUE (attr)) = variant; } } else if (!processing_template_decl) { error_at (varid_loc, "could not find variant declaration"); return true; } return false; } /* Helper function, finish up "omp declare variant base" attribute now that there is a DECL. ATTR is the first "omp declare variant base" attribute. */ void omp_declare_variant_finalize (tree decl, tree attr) { size_t attr_len = strlen ("omp declare variant base"); tree *list = &DECL_ATTRIBUTES (decl); bool remove_all = false; location_t match_loc = DECL_SOURCE_LOCATION (decl); if (TREE_CHAIN (TREE_VALUE (attr)) && TREE_PURPOSE (TREE_CHAIN (TREE_VALUE (attr))) && EXPR_HAS_LOCATION (TREE_PURPOSE (TREE_CHAIN (TREE_VALUE (attr))))) match_loc = EXPR_LOCATION (TREE_PURPOSE (TREE_CHAIN (TREE_VALUE (attr)))); if (DECL_CONSTRUCTOR_P (decl)) { error_at (match_loc, "%<declare variant%> on constructor %qD", decl); remove_all = true; } else if (DECL_DESTRUCTOR_P (decl)) { error_at (match_loc, "%<declare variant%> on destructor %qD", decl); remove_all = true; } else if (DECL_DEFAULTED_FN (decl)) { error_at (match_loc, "%<declare variant%> on defaulted %qD", decl); remove_all = true; } else if (DECL_DELETED_FN (decl)) { error_at (match_loc, "%<declare variant%> on deleted %qD", decl); remove_all = true; } else if (DECL_VIRTUAL_P (decl)) { error_at (match_loc, "%<declare variant%> on virtual %qD", decl); remove_all = true; } /* This loop is like private_lookup_attribute, except that it works with tree * rather than tree, as we might want to remove the attributes that are diagnosed as errorneous. */ while (*list) { tree attr = get_attribute_name (*list); size_t ident_len = IDENTIFIER_LENGTH (attr); if (cmp_attribs ("omp declare variant base", attr_len, IDENTIFIER_POINTER (attr), ident_len)) { if (remove_all || omp_declare_variant_finalize_one (decl, *list)) { *list = TREE_CHAIN (*list); continue; } } list = &TREE_CHAIN (*list); } } /* Finish processing of a declaration; install its line number and initial value. If the length of an array type is not known before, it must be determined now, from the initial value, or it is an error. INIT is the initializer (if any) for DECL. If INIT_CONST_EXPR_P is true, then INIT is an integral constant expression. FLAGS is LOOKUP_ONLYCONVERTING if the = init syntax was used, else 0 if the (init) syntax was used. */ void cp_finish_decl (tree decl, tree init, bool init_const_expr_p, tree asmspec_tree, int flags) { tree type; vec<tree, va_gc> *cleanups = NULL; const char *asmspec = NULL; int was_readonly = 0; bool var_definition_p = false; tree auto_node; if (decl == error_mark_node) return; else if (! decl) { if (init) error ("assignment (not initialization) in declaration"); return; } gcc_assert (TREE_CODE (decl) != RESULT_DECL); /* Parameters are handled by store_parm_decls, not cp_finish_decl. */ gcc_assert (TREE_CODE (decl) != PARM_DECL); type = TREE_TYPE (decl); if (type == error_mark_node) return; /* Warn about register storage specifiers except when in GNU global or local register variable extension. */ if (VAR_P (decl) && DECL_REGISTER (decl) && asmspec_tree == NULL_TREE) { if (cxx_dialect >= cxx17) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wregister, "ISO C++17 does not allow %<register%> storage " "class specifier"); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wregister, "%<register%> storage class specifier used"); } /* If a name was specified, get the string. */ if (at_namespace_scope_p ()) asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree); if (asmspec_tree && asmspec_tree != error_mark_node) asmspec = TREE_STRING_POINTER (asmspec_tree); if (current_class_type && CP_DECL_CONTEXT (decl) == current_class_type && TYPE_BEING_DEFINED (current_class_type) && !CLASSTYPE_TEMPLATE_INSTANTIATION (current_class_type) && (DECL_INITIAL (decl) || init)) DECL_INITIALIZED_IN_CLASS_P (decl) = 1; if (TREE_CODE (decl) != FUNCTION_DECL && (auto_node = type_uses_auto (type))) { tree d_init; if (init == NULL_TREE) { if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INSTANTIATION (decl) && !DECL_TEMPLATE_INSTANTIATED (decl)) { /* init is null because we're deferring instantiating the initializer until we need it. Well, we need it now. */ instantiate_decl (decl, /*defer_ok*/true, /*expl*/false); return; } gcc_assert (CLASS_PLACEHOLDER_TEMPLATE (auto_node)); } d_init = init; if (d_init) { if (TREE_CODE (d_init) == TREE_LIST && !CLASS_PLACEHOLDER_TEMPLATE (auto_node)) d_init = build_x_compound_expr_from_list (d_init, ELK_INIT, tf_warning_or_error); d_init = resolve_nondeduced_context (d_init, tf_warning_or_error); } enum auto_deduction_context adc = adc_variable_type; if (VAR_P (decl) && DECL_DECOMPOSITION_P (decl)) adc = adc_decomp_type; type = TREE_TYPE (decl) = do_auto_deduction (type, d_init, auto_node, tf_warning_or_error, adc, NULL_TREE, flags); if (type == error_mark_node) return; if (TREE_CODE (type) == FUNCTION_TYPE) { error ("initializer for %<decltype(auto) %D%> has function type; " "did you forget the %<()%>?", decl); TREE_TYPE (decl) = error_mark_node; return; } cp_apply_type_quals_to_decl (cp_type_quals (type), decl); } if (ensure_literal_type_for_constexpr_object (decl) == error_mark_node) { DECL_DECLARED_CONSTEXPR_P (decl) = 0; if (VAR_P (decl) && DECL_CLASS_SCOPE_P (decl)) { init = NULL_TREE; DECL_EXTERNAL (decl) = 1; } } if (VAR_P (decl) && DECL_CLASS_SCOPE_P (decl) && verify_type_context (DECL_SOURCE_LOCATION (decl), TCTX_STATIC_STORAGE, type) && DECL_INITIALIZED_IN_CLASS_P (decl)) check_static_variable_definition (decl, type); if (init && TREE_CODE (decl) == FUNCTION_DECL) { tree clone; if (init == ridpointers[(int)RID_DELETE]) { /* FIXME check this is 1st decl. */ DECL_DELETED_FN (decl) = 1; DECL_DECLARED_INLINE_P (decl) = 1; DECL_INITIAL (decl) = error_mark_node; FOR_EACH_CLONE (clone, decl) { DECL_DELETED_FN (clone) = 1; DECL_DECLARED_INLINE_P (clone) = 1; DECL_INITIAL (clone) = error_mark_node; } init = NULL_TREE; } else if (init == ridpointers[(int)RID_DEFAULT]) { if (defaultable_fn_check (decl)) DECL_DEFAULTED_FN (decl) = 1; else DECL_INITIAL (decl) = NULL_TREE; } } if (init && VAR_P (decl)) { DECL_NONTRIVIALLY_INITIALIZED_P (decl) = 1; /* If DECL is a reference, then we want to know whether init is a reference constant; init_const_expr_p as passed tells us whether it's an rvalue constant. */ if (TYPE_REF_P (type)) init_const_expr_p = potential_constant_expression (init); if (init_const_expr_p) { /* Set these flags now for templates. We'll update the flags in store_init_value for instantiations. */ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = 1; if (decl_maybe_constant_var_p (decl) /* FIXME setting TREE_CONSTANT on refs breaks the back end. */ && !TYPE_REF_P (type)) TREE_CONSTANT (decl) = 1; } } if (flag_openmp && TREE_CODE (decl) == FUNCTION_DECL /* #pragma omp declare variant on methods handled in finish_struct instead. */ && (!DECL_NONSTATIC_MEMBER_FUNCTION_P (decl) || COMPLETE_TYPE_P (DECL_CONTEXT (decl)))) if (tree attr = lookup_attribute ("omp declare variant base", DECL_ATTRIBUTES (decl))) omp_declare_variant_finalize (decl, attr); if (processing_template_decl) { bool type_dependent_p; /* Add this declaration to the statement-tree. */ if (at_function_scope_p ()) add_decl_expr (decl); type_dependent_p = dependent_type_p (type); if (check_for_bare_parameter_packs (init)) { init = NULL_TREE; DECL_INITIAL (decl) = NULL_TREE; } /* Generally, initializers in templates are expanded when the template is instantiated. But, if DECL is a variable constant then it can be used in future constant expressions, so its value must be available. */ bool dep_init = false; if (!VAR_P (decl) || type_dependent_p) /* We can't do anything if the decl has dependent type. */; else if (!init && is_concept_var (decl)) { error ("variable concept has no initializer"); init = boolean_true_node; } else if (init && (init_const_expr_p || DECL_DECLARED_CONSTEXPR_P (decl)) && !TYPE_REF_P (type) && decl_maybe_constant_var_p (decl) && !(dep_init = value_dependent_init_p (init))) { /* This variable seems to be a non-dependent constant, so process its initializer. If check_initializer returns non-null the initialization wasn't constant after all. */ tree init_code; cleanups = make_tree_vector (); init_code = check_initializer (decl, init, flags, &cleanups); if (init_code == NULL_TREE) init = NULL_TREE; release_tree_vector (cleanups); } else { gcc_assert (!DECL_PRETTY_FUNCTION_P (decl)); /* Deduce array size even if the initializer is dependent. */ maybe_deduce_size_from_array_init (decl, init); /* And complain about multiple initializers. */ if (init && TREE_CODE (init) == TREE_LIST && TREE_CHAIN (init) && !MAYBE_CLASS_TYPE_P (type)) init = build_x_compound_expr_from_list (init, ELK_INIT, tf_warning_or_error); } if (init) DECL_INITIAL (decl) = init; if (dep_init) { retrofit_lang_decl (decl); SET_DECL_DEPENDENT_INIT_P (decl, true); } return; } /* Just store non-static data member initializers for later. */ if (init && TREE_CODE (decl) == FIELD_DECL) DECL_INITIAL (decl) = init; /* Take care of TYPE_DECLs up front. */ if (TREE_CODE (decl) == TYPE_DECL) { if (type != error_mark_node && MAYBE_CLASS_TYPE_P (type) && DECL_NAME (decl)) { if (TREE_TYPE (DECL_NAME (decl)) && TREE_TYPE (decl) != type) warning (0, "shadowing previous type declaration of %q#D", decl); set_identifier_type_value (DECL_NAME (decl), decl); } /* If we have installed this as the canonical typedef for this type, and that type has not been defined yet, delay emitting the debug information for it, as we will emit it later. */ if (TYPE_MAIN_DECL (TREE_TYPE (decl)) == decl && !COMPLETE_TYPE_P (TREE_TYPE (decl))) TYPE_DECL_SUPPRESS_DEBUG (decl) = 1; rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), at_eof); return; } /* A reference will be modified here, as it is initialized. */ if (! DECL_EXTERNAL (decl) && TREE_READONLY (decl) && TYPE_REF_P (type)) { was_readonly = 1; TREE_READONLY (decl) = 0; } /* This needs to happen before extend_ref_init_temps. */ if (VAR_OR_FUNCTION_DECL_P (decl)) { if (VAR_P (decl)) maybe_commonize_var (decl); determine_visibility (decl); } if (VAR_P (decl)) { duration_kind dk = decl_storage_duration (decl); /* [dcl.constinit]/1 "The constinit specifier shall be applied only to a declaration of a variable with static or thread storage duration." */ if (DECL_DECLARED_CONSTINIT_P (decl) && !(dk == dk_thread || dk == dk_static)) { error_at (DECL_SOURCE_LOCATION (decl), "%<constinit%> can only be applied to a variable with " "static or thread storage duration"); return; } /* If this is a local variable that will need a mangled name, register it now. We must do this before processing the initializer for the variable, since the initialization might require a guard variable, and since the mangled name of the guard variable will depend on the mangled name of this variable. */ if (DECL_FUNCTION_SCOPE_P (decl) && TREE_STATIC (decl) && !DECL_ARTIFICIAL (decl)) { /* The variable holding an anonymous union will have had its discriminator set in finish_anon_union, after which it's NAME will have been cleared. */ if (DECL_NAME (decl)) determine_local_discriminator (decl); /* Normally has_forced_label_in_static is set during GIMPLE lowering, but [cd]tors are never actually compiled directly. We need to set this early so we can deal with the label address extension. */ if ((DECL_CONSTRUCTOR_P (current_function_decl) || DECL_DESTRUCTOR_P (current_function_decl)) && init) { walk_tree (&init, notice_forced_label_r, NULL, NULL); add_local_decl (cfun, decl); } /* And make sure it's in the symbol table for c_parse_final_cleanups to find. */ varpool_node::get_create (decl); } /* Convert the initializer to the type of DECL, if we have not already initialized DECL. */ if (!DECL_INITIALIZED_P (decl) /* If !DECL_EXTERNAL then DECL is being defined. In the case of a static data member initialized inside the class-specifier, there can be an initializer even if DECL is *not* defined. */ && (!DECL_EXTERNAL (decl) || init)) { cleanups = make_tree_vector (); init = check_initializer (decl, init, flags, &cleanups); /* Handle: [dcl.init] The memory occupied by any object of static storage duration is zero-initialized at program startup before any other initialization takes place. We cannot create an appropriate initializer until after the type of DECL is finalized. If DECL_INITIAL is set, then the DECL is statically initialized, and any necessary zero-initialization has already been performed. */ if (TREE_STATIC (decl) && !DECL_INITIAL (decl)) DECL_INITIAL (decl) = build_zero_init (TREE_TYPE (decl), /*nelts=*/NULL_TREE, /*static_storage_p=*/true); /* Remember that the initialization for this variable has taken place. */ DECL_INITIALIZED_P (decl) = 1; /* This declaration is the definition of this variable, unless we are initializing a static data member within the class specifier. */ if (!DECL_EXTERNAL (decl)) var_definition_p = true; } /* If the variable has an array type, lay out the type, even if there is no initializer. It is valid to index through the array, and we must get TYPE_ALIGN set correctly on the array type. */ else if (TREE_CODE (type) == ARRAY_TYPE) layout_type (type); if (TREE_STATIC (decl) && !at_function_scope_p () && current_function_decl == NULL) /* So decl is a global variable or a static member of a non local class. Record the types it uses so that we can decide later to emit debug info for them. */ record_types_used_by_current_var_decl (decl); } /* Add this declaration to the statement-tree. This needs to happen after the call to check_initializer so that the DECL_EXPR for a reference temp is added before the DECL_EXPR for the reference itself. */ if (DECL_FUNCTION_SCOPE_P (decl)) { /* If we're building a variable sized type, and we might be reachable other than via the top of the current binding level, then create a new BIND_EXPR so that we deallocate the object at the right time. */ if (VAR_P (decl) && DECL_SIZE (decl) && !TREE_CONSTANT (DECL_SIZE (decl)) && STATEMENT_LIST_HAS_LABEL (cur_stmt_list)) { tree bind; bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); BIND_EXPR_BODY (bind) = push_stmt_list (); } add_decl_expr (decl); } /* Let the middle end know about variables and functions -- but not static data members in uninstantiated class templates. */ if (VAR_OR_FUNCTION_DECL_P (decl)) { if (VAR_P (decl)) { layout_var_decl (decl); if (!flag_weak) /* Check again now that we have an initializer. */ maybe_commonize_var (decl); } if (var_definition_p /* With -fmerge-all-constants, gimplify_init_constructor might add TREE_STATIC to the variable. */ && (TREE_STATIC (decl) || flag_merge_constants >= 2)) { /* If a TREE_READONLY variable needs initialization at runtime, it is no longer readonly and we need to avoid MEM_READONLY_P being set on RTL created for it. */ if (init) { if (TREE_READONLY (decl)) TREE_READONLY (decl) = 0; was_readonly = 0; } else if (was_readonly) TREE_READONLY (decl) = 1; /* Likewise if it needs destruction. */ if (!decl_maybe_constant_destruction (decl, type)) TREE_READONLY (decl) = 0; } make_rtl_for_nonlocal_decl (decl, init, asmspec); /* Check for abstractness of the type. Notice that there is no need to strip array types here since the check for those types is already done within create_array_type_for_decl. */ abstract_virtuals_error (decl, type); if (TREE_TYPE (decl) == error_mark_node) /* No initialization required. */ ; else if (TREE_CODE (decl) == FUNCTION_DECL) { if (init) { if (init == ridpointers[(int)RID_DEFAULT]) { /* An out-of-class default definition is defined at the point where it is explicitly defaulted. */ if (DECL_DELETED_FN (decl)) maybe_explain_implicit_delete (decl); else if (DECL_INITIAL (decl) == error_mark_node) synthesize_method (decl); } else error_at (cp_expr_loc_or_loc (init, DECL_SOURCE_LOCATION (decl)), "function %q#D is initialized like a variable", decl); } /* else no initialization required. */ } else if (DECL_EXTERNAL (decl) && ! (DECL_LANG_SPECIFIC (decl) && DECL_NOT_REALLY_EXTERN (decl))) { /* check_initializer will have done any constant initialization. */ } /* A variable definition. */ else if (DECL_FUNCTION_SCOPE_P (decl) && !TREE_STATIC (decl)) /* Initialize the local variable. */ initialize_local_var (decl, init); /* If a variable is defined, and then a subsequent definition with external linkage is encountered, we will get here twice for the same variable. We want to avoid calling expand_static_init more than once. For variables that are not static data members, we can call expand_static_init only when we actually process the initializer. It is not legal to redeclare a static data member, so this issue does not arise in that case. */ else if (var_definition_p && TREE_STATIC (decl)) expand_static_init (decl, init); } /* If a CLEANUP_STMT was created to destroy a temporary bound to a reference, insert it in the statement-tree now. */ if (cleanups) { unsigned i; tree t; FOR_EACH_VEC_ELT (*cleanups, i, t) push_cleanup (decl, t, false); release_tree_vector (cleanups); } if (was_readonly) TREE_READONLY (decl) = 1; if (flag_openmp && VAR_P (decl) && lookup_attribute ("omp declare target implicit", DECL_ATTRIBUTES (decl))) { DECL_ATTRIBUTES (decl) = remove_attribute ("omp declare target implicit", DECL_ATTRIBUTES (decl)); complete_type (TREE_TYPE (decl)); if (!cp_omp_mappable_type (TREE_TYPE (decl))) { error ("%q+D in declare target directive does not have mappable" " type", decl); cp_omp_emit_unmappable_type_notes (TREE_TYPE (decl)); } else if (!lookup_attribute ("omp declare target", DECL_ATTRIBUTES (decl)) && !lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (decl))) DECL_ATTRIBUTES (decl) = tree_cons (get_identifier ("omp declare target"), NULL_TREE, DECL_ATTRIBUTES (decl)); } /* This is the last point we can lower alignment so give the target the chance to do so. */ if (VAR_P (decl) && !is_global_var (decl) && !DECL_HARD_REGISTER (decl)) targetm.lower_local_decl_alignment (decl); invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl); } /* For class TYPE return itself or some its bases that contain any direct non-static data members. Return error_mark_node if an error has been diagnosed. */ static tree find_decomp_class_base (location_t loc, tree type, tree ret) { bool member_seen = false; for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) != FIELD_DECL || DECL_ARTIFICIAL (field) || DECL_UNNAMED_BIT_FIELD (field)) continue; else if (ret) return type; else if (ANON_AGGR_TYPE_P (TREE_TYPE (field))) { if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE) error_at (loc, "cannot decompose class type %qT because it has an " "anonymous struct member", type); else error_at (loc, "cannot decompose class type %qT because it has an " "anonymous union member", type); inform (DECL_SOURCE_LOCATION (field), "declared here"); return error_mark_node; } else if (!accessible_p (type, field, true)) { error_at (loc, "cannot decompose inaccessible member %qD of %qT", field, type); inform (DECL_SOURCE_LOCATION (field), TREE_PRIVATE (field) ? G_("declared private here") : G_("declared protected here")); return error_mark_node; } else member_seen = true; tree base_binfo, binfo; tree orig_ret = ret; int i; if (member_seen) ret = type; for (binfo = TYPE_BINFO (type), i = 0; BINFO_BASE_ITERATE (binfo, i, base_binfo); i++) { tree t = find_decomp_class_base (loc, TREE_TYPE (base_binfo), ret); if (t == error_mark_node) return error_mark_node; if (t != NULL_TREE && t != ret) { if (ret == type) { error_at (loc, "cannot decompose class type %qT: both it and " "its base class %qT have non-static data members", type, t); return error_mark_node; } else if (orig_ret != NULL_TREE) return t; else if (ret != NULL_TREE) { error_at (loc, "cannot decompose class type %qT: its base " "classes %qT and %qT have non-static data " "members", type, ret, t); return error_mark_node; } else ret = t; } } return ret; } /* Return std::tuple_size<TYPE>::value. */ static tree get_tuple_size (tree type) { tree args = make_tree_vec (1); TREE_VEC_ELT (args, 0) = type; tree inst = lookup_template_class (tuple_size_identifier, args, /*in_decl*/NULL_TREE, /*context*/std_node, /*entering_scope*/false, tf_none); inst = complete_type (inst); if (inst == error_mark_node || !COMPLETE_TYPE_P (inst)) return NULL_TREE; tree val = lookup_qualified_name (inst, value_identifier, LOOK_want::NORMAL, /*complain*/false); if (TREE_CODE (val) == VAR_DECL || TREE_CODE (val) == CONST_DECL) val = maybe_constant_value (val); if (TREE_CODE (val) == INTEGER_CST) return val; else return error_mark_node; } /* Return std::tuple_element<I,TYPE>::type. */ static tree get_tuple_element_type (tree type, unsigned i) { tree args = make_tree_vec (2); TREE_VEC_ELT (args, 0) = build_int_cst (integer_type_node, i); TREE_VEC_ELT (args, 1) = type; tree inst = lookup_template_class (tuple_element_identifier, args, /*in_decl*/NULL_TREE, /*context*/std_node, /*entering_scope*/false, tf_warning_or_error); return make_typename_type (inst, type_identifier, none_type, tf_warning_or_error); } /* Return e.get<i>() or get<i>(e). */ static tree get_tuple_decomp_init (tree decl, unsigned i) { tree targs = make_tree_vec (1); TREE_VEC_ELT (targs, 0) = build_int_cst (integer_type_node, i); tree etype = TREE_TYPE (decl); tree e = convert_from_reference (decl); /* [The id-expression] e is an lvalue if the type of the entity e is an lvalue reference and an xvalue otherwise. */ if (!TYPE_REF_P (etype) || TYPE_REF_IS_RVALUE (etype)) e = move (e); tree fns = lookup_qualified_name (TREE_TYPE (e), get__identifier, LOOK_want::NORMAL, /*complain*/false); bool use_member_get = false; /* To use a member get, member lookup must find at least one declaration that is a function template whose first template parameter is a non-type parameter. */ for (lkp_iterator iter (MAYBE_BASELINK_FUNCTIONS (fns)); iter; ++iter) { tree fn = *iter; if (TREE_CODE (fn) == TEMPLATE_DECL) { tree tparms = DECL_TEMPLATE_PARMS (fn); tree parm = TREE_VEC_ELT (INNERMOST_TEMPLATE_PARMS (tparms), 0); if (TREE_CODE (TREE_VALUE (parm)) == PARM_DECL) { use_member_get = true; break; } } } if (use_member_get) { fns = lookup_template_function (fns, targs); return build_new_method_call (e, fns, /*args*/NULL, /*path*/NULL_TREE, LOOKUP_NORMAL, /*fn_p*/NULL, tf_warning_or_error); } else { releasing_vec args (make_tree_vector_single (e)); fns = lookup_template_function (get__identifier, targs); fns = perform_koenig_lookup (fns, args, tf_warning_or_error); return finish_call_expr (fns, &args, /*novirt*/false, /*koenig*/true, tf_warning_or_error); } } /* It's impossible to recover the decltype of a tuple decomposition variable based on the actual type of the variable, so store it in a hash table. */ static GTY((cache)) decl_tree_cache_map *decomp_type_table; tree lookup_decomp_type (tree v) { return *decomp_type_table->get (v); } /* Mangle a decomposition declaration if needed. Arguments like in cp_finish_decomp. */ void cp_maybe_mangle_decomp (tree decl, tree first, unsigned int count) { if (!processing_template_decl && !error_operand_p (decl) && TREE_STATIC (decl)) { auto_vec<tree, 16> v; v.safe_grow (count, true); tree d = first; for (unsigned int i = 0; i < count; i++, d = DECL_CHAIN (d)) v[count - i - 1] = d; SET_DECL_ASSEMBLER_NAME (decl, mangle_decomp (decl, v)); maybe_apply_pragma_weak (decl); } } /* Finish a decomposition declaration. DECL is the underlying declaration "e", FIRST is the head of a chain of decls for the individual identifiers chained through DECL_CHAIN in reverse order and COUNT is the number of those decls. */ void cp_finish_decomp (tree decl, tree first, unsigned int count) { if (error_operand_p (decl)) { error_out: while (count--) { TREE_TYPE (first) = error_mark_node; if (DECL_HAS_VALUE_EXPR_P (first)) { SET_DECL_VALUE_EXPR (first, NULL_TREE); DECL_HAS_VALUE_EXPR_P (first) = 0; } first = DECL_CHAIN (first); } if (DECL_P (decl) && DECL_NAMESPACE_SCOPE_P (decl)) SET_DECL_ASSEMBLER_NAME (decl, get_identifier ("<decomp>")); return; } location_t loc = DECL_SOURCE_LOCATION (decl); if (type_dependent_expression_p (decl) /* This happens for range for when not in templates. Still add the DECL_VALUE_EXPRs for later processing. */ || (!processing_template_decl && type_uses_auto (TREE_TYPE (decl)))) { for (unsigned int i = 0; i < count; i++) { if (!DECL_HAS_VALUE_EXPR_P (first)) { tree v = build_nt (ARRAY_REF, decl, size_int (count - i - 1), NULL_TREE, NULL_TREE); SET_DECL_VALUE_EXPR (first, v); DECL_HAS_VALUE_EXPR_P (first) = 1; } if (processing_template_decl) fit_decomposition_lang_decl (first, decl); first = DECL_CHAIN (first); } return; } auto_vec<tree, 16> v; v.safe_grow (count, true); tree d = first; for (unsigned int i = 0; i < count; i++, d = DECL_CHAIN (d)) { v[count - i - 1] = d; fit_decomposition_lang_decl (d, decl); } tree type = TREE_TYPE (decl); tree dexp = decl; if (TYPE_REF_P (type)) { dexp = convert_from_reference (dexp); type = complete_type (TREE_TYPE (type)); if (type == error_mark_node) goto error_out; if (!COMPLETE_TYPE_P (type)) { error_at (loc, "structured binding refers to incomplete type %qT", type); goto error_out; } } tree eltype = NULL_TREE; unsigned HOST_WIDE_INT eltscnt = 0; if (TREE_CODE (type) == ARRAY_TYPE) { tree nelts; nelts = array_type_nelts_top (type); if (nelts == error_mark_node) goto error_out; if (!tree_fits_uhwi_p (nelts)) { error_at (loc, "cannot decompose variable length array %qT", type); goto error_out; } eltscnt = tree_to_uhwi (nelts); if (count != eltscnt) { cnt_mismatch: if (count > eltscnt) error_n (loc, count, "%u name provided for structured binding", "%u names provided for structured binding", count); else error_n (loc, count, "only %u name provided for structured binding", "only %u names provided for structured binding", count); inform_n (loc, eltscnt, "while %qT decomposes into %wu element", "while %qT decomposes into %wu elements", type, eltscnt); goto error_out; } eltype = TREE_TYPE (type); for (unsigned int i = 0; i < count; i++) { TREE_TYPE (v[i]) = eltype; layout_decl (v[i], 0); if (processing_template_decl) continue; tree t = unshare_expr (dexp); t = build4_loc (DECL_SOURCE_LOCATION (v[i]), ARRAY_REF, eltype, t, size_int (i), NULL_TREE, NULL_TREE); SET_DECL_VALUE_EXPR (v[i], t); DECL_HAS_VALUE_EXPR_P (v[i]) = 1; } } /* 2 GNU extensions. */ else if (TREE_CODE (type) == COMPLEX_TYPE) { eltscnt = 2; if (count != eltscnt) goto cnt_mismatch; eltype = cp_build_qualified_type (TREE_TYPE (type), TYPE_QUALS (type)); for (unsigned int i = 0; i < count; i++) { TREE_TYPE (v[i]) = eltype; layout_decl (v[i], 0); if (processing_template_decl) continue; tree t = unshare_expr (dexp); t = build1_loc (DECL_SOURCE_LOCATION (v[i]), i ? IMAGPART_EXPR : REALPART_EXPR, eltype, t); SET_DECL_VALUE_EXPR (v[i], t); DECL_HAS_VALUE_EXPR_P (v[i]) = 1; } } else if (TREE_CODE (type) == VECTOR_TYPE) { if (!TYPE_VECTOR_SUBPARTS (type).is_constant (&eltscnt)) { error_at (loc, "cannot decompose variable length vector %qT", type); goto error_out; } if (count != eltscnt) goto cnt_mismatch; eltype = cp_build_qualified_type (TREE_TYPE (type), TYPE_QUALS (type)); for (unsigned int i = 0; i < count; i++) { TREE_TYPE (v[i]) = eltype; layout_decl (v[i], 0); if (processing_template_decl) continue; tree t = unshare_expr (dexp); convert_vector_to_array_for_subscript (DECL_SOURCE_LOCATION (v[i]), &t, size_int (i)); t = build4_loc (DECL_SOURCE_LOCATION (v[i]), ARRAY_REF, eltype, t, size_int (i), NULL_TREE, NULL_TREE); SET_DECL_VALUE_EXPR (v[i], t); DECL_HAS_VALUE_EXPR_P (v[i]) = 1; } } else if (tree tsize = get_tuple_size (type)) { if (tsize == error_mark_node) { error_at (loc, "%<std::tuple_size<%T>::value%> is not an integral " "constant expression", type); goto error_out; } if (!tree_fits_uhwi_p (tsize)) { error_n (loc, count, "%u name provided for structured binding", "%u names provided for structured binding", count); inform (loc, "while %qT decomposes into %E elements", type, tsize); goto error_out; } eltscnt = tree_to_uhwi (tsize); if (count != eltscnt) goto cnt_mismatch; int save_read = DECL_READ_P (decl); for (unsigned i = 0; i < count; ++i) { location_t sloc = input_location; location_t dloc = DECL_SOURCE_LOCATION (v[i]); input_location = dloc; tree init = get_tuple_decomp_init (decl, i); tree eltype = (init == error_mark_node ? error_mark_node : get_tuple_element_type (type, i)); input_location = sloc; if (init == error_mark_node || eltype == error_mark_node) { inform (dloc, "in initialization of structured binding " "variable %qD", v[i]); goto error_out; } /* Save the decltype away before reference collapse. */ hash_map_safe_put<hm_ggc> (decomp_type_table, v[i], eltype); eltype = cp_build_reference_type (eltype, !lvalue_p (init)); TREE_TYPE (v[i]) = eltype; layout_decl (v[i], 0); if (DECL_HAS_VALUE_EXPR_P (v[i])) { /* In this case the names are variables, not just proxies. */ SET_DECL_VALUE_EXPR (v[i], NULL_TREE); DECL_HAS_VALUE_EXPR_P (v[i]) = 0; } if (!processing_template_decl) { copy_linkage (v[i], decl); cp_finish_decl (v[i], init, /*constexpr*/false, /*asm*/NULL_TREE, LOOKUP_NORMAL); } } /* Ignore reads from the underlying decl performed during initialization of the individual variables. If those will be read, we'll mark the underlying decl as read at that point. */ DECL_READ_P (decl) = save_read; } else if (TREE_CODE (type) == UNION_TYPE) { error_at (loc, "cannot decompose union type %qT", type); goto error_out; } else if (!CLASS_TYPE_P (type)) { error_at (loc, "cannot decompose non-array non-class type %qT", type); goto error_out; } else if (LAMBDA_TYPE_P (type)) { error_at (loc, "cannot decompose lambda closure type %qT", type); goto error_out; } else if (processing_template_decl && complete_type (type) == error_mark_node) goto error_out; else if (processing_template_decl && !COMPLETE_TYPE_P (type)) pedwarn (loc, 0, "structured binding refers to incomplete class type %qT", type); else { tree btype = find_decomp_class_base (loc, type, NULL_TREE); if (btype == error_mark_node) goto error_out; else if (btype == NULL_TREE) { error_at (loc, "cannot decompose class type %qT without non-static " "data members", type); goto error_out; } for (tree field = TYPE_FIELDS (btype); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) != FIELD_DECL || DECL_ARTIFICIAL (field) || DECL_UNNAMED_BIT_FIELD (field)) continue; else eltscnt++; if (count != eltscnt) goto cnt_mismatch; tree t = dexp; if (type != btype) { t = convert_to_base (t, btype, /*check_access*/true, /*nonnull*/false, tf_warning_or_error); type = btype; } unsigned int i = 0; for (tree field = TYPE_FIELDS (btype); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) != FIELD_DECL || DECL_ARTIFICIAL (field) || DECL_UNNAMED_BIT_FIELD (field)) continue; else { tree tt = finish_non_static_data_member (field, unshare_expr (t), NULL_TREE); if (REFERENCE_REF_P (tt)) tt = TREE_OPERAND (tt, 0); TREE_TYPE (v[i]) = TREE_TYPE (tt); layout_decl (v[i], 0); if (!processing_template_decl) { SET_DECL_VALUE_EXPR (v[i], tt); DECL_HAS_VALUE_EXPR_P (v[i]) = 1; } i++; } } if (processing_template_decl) { for (unsigned int i = 0; i < count; i++) if (!DECL_HAS_VALUE_EXPR_P (v[i])) { tree a = build_nt (ARRAY_REF, decl, size_int (i), NULL_TREE, NULL_TREE); SET_DECL_VALUE_EXPR (v[i], a); DECL_HAS_VALUE_EXPR_P (v[i]) = 1; } } } /* Returns a declaration for a VAR_DECL as if: extern "C" TYPE NAME; had been seen. Used to create compiler-generated global variables. */ static tree declare_global_var (tree name, tree type) { tree decl; push_to_top_level (); decl = build_decl (input_location, VAR_DECL, name, type); TREE_PUBLIC (decl) = 1; DECL_EXTERNAL (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_CONTEXT (decl) = FROB_CONTEXT (global_namespace); /* If the user has explicitly declared this variable (perhaps because the code we are compiling is part of a low-level runtime library), then it is possible that our declaration will be merged with theirs by pushdecl. */ decl = pushdecl (decl); cp_finish_decl (decl, NULL_TREE, false, NULL_TREE, 0); pop_from_top_level (); return decl; } /* Returns the type for the argument to "__cxa_atexit" (or "atexit", if "__cxa_atexit" is not being used) corresponding to the function to be called when the program exits. */ static tree get_atexit_fn_ptr_type (void) { tree fn_type; if (!atexit_fn_ptr_type_node) { tree arg_type; if (flag_use_cxa_atexit && !targetm.cxx.use_atexit_for_cxa_atexit ()) /* The parameter to "__cxa_atexit" is "void (*)(void *)". */ arg_type = ptr_type_node; else /* The parameter to "atexit" is "void (*)(void)". */ arg_type = NULL_TREE; fn_type = build_function_type_list (void_type_node, arg_type, NULL_TREE); atexit_fn_ptr_type_node = build_pointer_type (fn_type); } return atexit_fn_ptr_type_node; } /* Returns a pointer to the `atexit' function. Note that if FLAG_USE_CXA_ATEXIT is nonzero, then this will actually be the new `__cxa_atexit' function specified in the IA64 C++ ABI. */ static tree get_atexit_node (void) { tree atexit_fndecl; tree fn_type; tree fn_ptr_type; const char *name; bool use_aeabi_atexit; if (atexit_node) return atexit_node; if (flag_use_cxa_atexit && !targetm.cxx.use_atexit_for_cxa_atexit ()) { /* The declaration for `__cxa_atexit' is: int __cxa_atexit (void (*)(void *), void *, void *) We build up the argument types and then the function type itself. */ tree argtype0, argtype1, argtype2; use_aeabi_atexit = targetm.cxx.use_aeabi_atexit (); /* First, build the pointer-to-function type for the first argument. */ fn_ptr_type = get_atexit_fn_ptr_type (); /* Then, build the rest of the argument types. */ argtype2 = ptr_type_node; if (use_aeabi_atexit) { argtype1 = fn_ptr_type; argtype0 = ptr_type_node; } else { argtype1 = ptr_type_node; argtype0 = fn_ptr_type; } /* And the final __cxa_atexit type. */ fn_type = build_function_type_list (integer_type_node, argtype0, argtype1, argtype2, NULL_TREE); if (use_aeabi_atexit) name = "__aeabi_atexit"; else name = "__cxa_atexit"; } else { /* The declaration for `atexit' is: int atexit (void (*)()); We build up the argument types and then the function type itself. */ fn_ptr_type = get_atexit_fn_ptr_type (); /* Build the final atexit type. */ fn_type = build_function_type_list (integer_type_node, fn_ptr_type, NULL_TREE); name = "atexit"; } /* Now, build the function declaration. */ push_lang_context (lang_name_c); atexit_fndecl = build_library_fn_ptr (name, fn_type, ECF_LEAF | ECF_NOTHROW); mark_used (atexit_fndecl); pop_lang_context (); atexit_node = decay_conversion (atexit_fndecl, tf_warning_or_error); return atexit_node; } /* Like get_atexit_node, but for thread-local cleanups. */ static tree get_thread_atexit_node (void) { /* The declaration for `__cxa_thread_atexit' is: int __cxa_thread_atexit (void (*)(void *), void *, void *) */ tree fn_type = build_function_type_list (integer_type_node, get_atexit_fn_ptr_type (), ptr_type_node, ptr_type_node, NULL_TREE); /* Now, build the function declaration. */ tree atexit_fndecl = build_library_fn_ptr ("__cxa_thread_atexit", fn_type, ECF_LEAF | ECF_NOTHROW); return decay_conversion (atexit_fndecl, tf_warning_or_error); } /* Returns the __dso_handle VAR_DECL. */ static tree get_dso_handle_node (void) { if (dso_handle_node) return dso_handle_node; /* Declare the variable. */ dso_handle_node = declare_global_var (get_identifier ("__dso_handle"), ptr_type_node); #ifdef HAVE_GAS_HIDDEN if (dso_handle_node != error_mark_node) { DECL_VISIBILITY (dso_handle_node) = VISIBILITY_HIDDEN; DECL_VISIBILITY_SPECIFIED (dso_handle_node) = 1; } #endif return dso_handle_node; } /* Begin a new function with internal linkage whose job will be simply to destroy some particular variable. */ static GTY(()) int start_cleanup_cnt; static tree start_cleanup_fn (void) { char name[32]; tree fntype; tree fndecl; bool use_cxa_atexit = flag_use_cxa_atexit && !targetm.cxx.use_atexit_for_cxa_atexit (); push_to_top_level (); /* No need to mangle this. */ push_lang_context (lang_name_c); /* Build the name of the function. */ sprintf (name, "__tcf_%d", start_cleanup_cnt++); /* Build the function declaration. */ fntype = TREE_TYPE (get_atexit_fn_ptr_type ()); fndecl = build_lang_decl (FUNCTION_DECL, get_identifier (name), fntype); /* It's a function with internal linkage, generated by the compiler. */ TREE_PUBLIC (fndecl) = 0; DECL_ARTIFICIAL (fndecl) = 1; /* Make the function `inline' so that it is only emitted if it is actually needed. It is unlikely that it will be inlined, since it is only called via a function pointer, but we avoid unnecessary emissions this way. */ DECL_DECLARED_INLINE_P (fndecl) = 1; DECL_INTERFACE_KNOWN (fndecl) = 1; /* Build the parameter. */ if (use_cxa_atexit) { tree parmdecl = cp_build_parm_decl (fndecl, NULL_TREE, ptr_type_node); TREE_USED (parmdecl) = 1; DECL_READ_P (parmdecl) = 1; DECL_ARGUMENTS (fndecl) = parmdecl; } pushdecl (fndecl); start_preparsed_function (fndecl, NULL_TREE, SF_PRE_PARSED); pop_lang_context (); return current_function_decl; } /* Finish the cleanup function begun by start_cleanup_fn. */ static void end_cleanup_fn (void) { expand_or_defer_fn (finish_function (/*inline_p=*/false)); pop_from_top_level (); } /* Generate code to handle the destruction of DECL, an object with static storage duration. */ tree register_dtor_fn (tree decl) { tree cleanup; tree addr; tree compound_stmt; tree fcall; tree type; bool ob_parm, dso_parm, use_dtor; tree arg0, arg1, arg2; tree atex_node; type = TREE_TYPE (decl); if (TYPE_HAS_TRIVIAL_DESTRUCTOR (type)) return void_node; if (decl_maybe_constant_destruction (decl, type) && DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl)) { cxx_maybe_build_cleanup (decl, tf_warning_or_error); return void_node; } /* If we're using "__cxa_atexit" (or "__cxa_thread_atexit" or "__aeabi_atexit"), and DECL is a class object, we can just pass the destructor to "__cxa_atexit"; we don't have to build a temporary function to do the cleanup. */ dso_parm = (flag_use_cxa_atexit && !targetm.cxx.use_atexit_for_cxa_atexit ()); ob_parm = (CP_DECL_THREAD_LOCAL_P (decl) || dso_parm); use_dtor = ob_parm && CLASS_TYPE_P (type); if (use_dtor) { cleanup = get_class_binding (type, complete_dtor_identifier); /* Make sure it is accessible. */ perform_or_defer_access_check (TYPE_BINFO (type), cleanup, cleanup, tf_warning_or_error); } else { /* Call build_cleanup before we enter the anonymous function so that any access checks will be done relative to the current scope, rather than the scope of the anonymous function. */ build_cleanup (decl); /* Now start the function. */ cleanup = start_cleanup_fn (); /* Now, recompute the cleanup. It may contain SAVE_EXPRs that refer to the original function, rather than the anonymous one. That will make the back end think that nested functions are in use, which causes confusion. */ push_deferring_access_checks (dk_no_check); fcall = build_cleanup (decl); pop_deferring_access_checks (); /* Create the body of the anonymous function. */ compound_stmt = begin_compound_stmt (BCS_FN_BODY); finish_expr_stmt (fcall); finish_compound_stmt (compound_stmt); end_cleanup_fn (); } /* Call atexit with the cleanup function. */ mark_used (cleanup); cleanup = build_address (cleanup); if (CP_DECL_THREAD_LOCAL_P (decl)) atex_node = get_thread_atexit_node (); else atex_node = get_atexit_node (); if (use_dtor) { /* We must convert CLEANUP to the type that "__cxa_atexit" expects. */ cleanup = build_nop (get_atexit_fn_ptr_type (), cleanup); /* "__cxa_atexit" will pass the address of DECL to the cleanup function. */ mark_used (decl); addr = build_address (decl); /* The declared type of the parameter to "__cxa_atexit" is "void *". For plain "T*", we could just let the machinery in cp_build_function_call convert it -- but if the type is "cv-qualified T *", then we need to convert it before passing it in, to avoid spurious errors. */ addr = build_nop (ptr_type_node, addr); } else /* Since the cleanup functions we build ignore the address they're given, there's no reason to pass the actual address in, and, in general, it's cheaper to pass NULL than any other value. */ addr = null_pointer_node; if (dso_parm) arg2 = cp_build_addr_expr (get_dso_handle_node (), tf_warning_or_error); else if (ob_parm) /* Just pass NULL to the dso handle parm if we don't actually have a DSO handle on this target. */ arg2 = null_pointer_node; else arg2 = NULL_TREE; if (ob_parm) { if (!CP_DECL_THREAD_LOCAL_P (decl) && targetm.cxx.use_aeabi_atexit ()) { arg1 = cleanup; arg0 = addr; } else { arg1 = addr; arg0 = cleanup; } } else { arg0 = cleanup; arg1 = NULL_TREE; } return cp_build_function_call_nary (atex_node, tf_warning_or_error, arg0, arg1, arg2, NULL_TREE); } /* DECL is a VAR_DECL with static storage duration. INIT, if present, is its initializer. Generate code to handle the construction and destruction of DECL. */ static void expand_static_init (tree decl, tree init) { gcc_assert (VAR_P (decl)); gcc_assert (TREE_STATIC (decl)); /* Some variables require no dynamic initialization. */ if (decl_maybe_constant_destruction (decl, TREE_TYPE (decl))) { /* Make sure the destructor is callable. */ cxx_maybe_build_cleanup (decl, tf_warning_or_error); if (!init) return; } if (CP_DECL_THREAD_LOCAL_P (decl) && DECL_GNU_TLS_P (decl) && !DECL_FUNCTION_SCOPE_P (decl)) { location_t dloc = DECL_SOURCE_LOCATION (decl); if (init) error_at (dloc, "non-local variable %qD declared %<__thread%> " "needs dynamic initialization", decl); else error_at (dloc, "non-local variable %qD declared %<__thread%> " "has a non-trivial destructor", decl); static bool informed; if (!informed) { inform (dloc, "C++11 %<thread_local%> allows dynamic " "initialization and destruction"); informed = true; } return; } if (DECL_FUNCTION_SCOPE_P (decl)) { /* Emit code to perform this initialization but once. */ tree if_stmt = NULL_TREE, inner_if_stmt = NULL_TREE; tree then_clause = NULL_TREE, inner_then_clause = NULL_TREE; tree guard, guard_addr; tree flag, begin; /* We don't need thread-safety code for thread-local vars. */ bool thread_guard = (flag_threadsafe_statics && !CP_DECL_THREAD_LOCAL_P (decl)); /* Emit code to perform this initialization but once. This code looks like: static <type> guard; if (!__atomic_load (guard.first_byte)) { if (__cxa_guard_acquire (&guard)) { bool flag = false; try { // Do initialization. flag = true; __cxa_guard_release (&guard); // Register variable for destruction at end of program. } catch { if (!flag) __cxa_guard_abort (&guard); } } } Note that the `flag' variable is only set to 1 *after* the initialization is complete. This ensures that an exception, thrown during the construction, will cause the variable to reinitialized when we pass through this code again, as per: [stmt.dcl] If the initialization exits by throwing an exception, the initialization is not complete, so it will be tried again the next time control enters the declaration. This process should be thread-safe, too; multiple threads should not be able to initialize the variable more than once. */ /* Create the guard variable. */ guard = get_guard (decl); /* Begin the conditional initialization. */ if_stmt = begin_if_stmt (); finish_if_stmt_cond (get_guard_cond (guard, thread_guard), if_stmt); then_clause = begin_compound_stmt (BCS_NO_SCOPE); if (thread_guard) { tree vfntype = NULL_TREE; tree acquire_name, release_name, abort_name; tree acquire_fn, release_fn, abort_fn; guard_addr = build_address (guard); acquire_name = get_identifier ("__cxa_guard_acquire"); release_name = get_identifier ("__cxa_guard_release"); abort_name = get_identifier ("__cxa_guard_abort"); acquire_fn = get_global_binding (acquire_name); release_fn = get_global_binding (release_name); abort_fn = get_global_binding (abort_name); if (!acquire_fn) acquire_fn = push_library_fn (acquire_name, build_function_type_list (integer_type_node, TREE_TYPE (guard_addr), NULL_TREE), NULL_TREE, ECF_NOTHROW); if (!release_fn || !abort_fn) vfntype = build_function_type_list (void_type_node, TREE_TYPE (guard_addr), NULL_TREE); if (!release_fn) release_fn = push_library_fn (release_name, vfntype, NULL_TREE, ECF_NOTHROW); if (!abort_fn) abort_fn = push_library_fn (abort_name, vfntype, NULL_TREE, ECF_NOTHROW | ECF_LEAF); inner_if_stmt = begin_if_stmt (); finish_if_stmt_cond (build_call_n (acquire_fn, 1, guard_addr), inner_if_stmt); inner_then_clause = begin_compound_stmt (BCS_NO_SCOPE); begin = get_target_expr (boolean_false_node); flag = TARGET_EXPR_SLOT (begin); TARGET_EXPR_CLEANUP (begin) = build3 (COND_EXPR, void_type_node, flag, void_node, build_call_n (abort_fn, 1, guard_addr)); CLEANUP_EH_ONLY (begin) = 1; /* Do the initialization itself. */ init = add_stmt_to_compound (begin, init); init = add_stmt_to_compound (init, build2 (MODIFY_EXPR, void_type_node, flag, boolean_true_node)); init = add_stmt_to_compound (init, build_call_n (release_fn, 1, guard_addr)); } else init = add_stmt_to_compound (init, set_guard (guard)); /* Use atexit to register a function for destroying this static variable. */ init = add_stmt_to_compound (init, register_dtor_fn (decl)); finish_expr_stmt (init); if (thread_guard) { finish_compound_stmt (inner_then_clause); finish_then_clause (inner_if_stmt); finish_if_stmt (inner_if_stmt); } finish_compound_stmt (then_clause); finish_then_clause (if_stmt); finish_if_stmt (if_stmt); } else if (CP_DECL_THREAD_LOCAL_P (decl)) tls_aggregates = tree_cons (init, decl, tls_aggregates); else static_aggregates = tree_cons (init, decl, static_aggregates); } /* Make TYPE a complete type based on INITIAL_VALUE. Return 0 if successful, 1 if INITIAL_VALUE can't be deciphered, 2 if there was no information (in which case assume 0 if DO_DEFAULT), 3 if the initializer list is empty (in pedantic mode). */ int cp_complete_array_type (tree *ptype, tree initial_value, bool do_default) { int failure; tree type, elt_type; /* Don't get confused by a CONSTRUCTOR for some other type. */ if (initial_value && TREE_CODE (initial_value) == CONSTRUCTOR && !BRACE_ENCLOSED_INITIALIZER_P (initial_value) && TREE_CODE (TREE_TYPE (initial_value)) != ARRAY_TYPE) return 1; if (initial_value) { unsigned HOST_WIDE_INT i; tree value; /* An array of character type can be initialized from a brace-enclosed string constant. FIXME: this code is duplicated from reshape_init. Probably we should just call reshape_init here? */ if (char_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (*ptype))) && TREE_CODE (initial_value) == CONSTRUCTOR && !vec_safe_is_empty (CONSTRUCTOR_ELTS (initial_value))) { vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (initial_value); tree value = (*v)[0].value; STRIP_ANY_LOCATION_WRAPPER (value); if (TREE_CODE (value) == STRING_CST && v->length () == 1) initial_value = value; } /* If any of the elements are parameter packs, we can't actually complete this type now because the array size is dependent. */ if (TREE_CODE (initial_value) == CONSTRUCTOR) { FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (initial_value), i, value) { if (PACK_EXPANSION_P (value)) return 0; } } } failure = complete_array_type (ptype, initial_value, do_default); /* We can create the array before the element type is complete, which means that we didn't have these two bits set in the original type either. In completing the type, we are expected to propagate these bits. See also complete_type which does the same thing for arrays of fixed size. */ type = *ptype; if (type != error_mark_node && TYPE_DOMAIN (type)) { elt_type = TREE_TYPE (type); TYPE_NEEDS_CONSTRUCTING (type) = TYPE_NEEDS_CONSTRUCTING (elt_type); TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type) = TYPE_HAS_NONTRIVIAL_DESTRUCTOR (elt_type); } return failure; } /* As above, but either give an error or reject zero-size arrays, depending on COMPLAIN. */ int cp_complete_array_type_or_error (tree *ptype, tree initial_value, bool do_default, tsubst_flags_t complain) { int failure; bool sfinae = !(complain & tf_error); /* In SFINAE context we can't be lenient about zero-size arrays. */ if (sfinae) ++pedantic; failure = cp_complete_array_type (ptype, initial_value, do_default); if (sfinae) --pedantic; if (failure) { if (sfinae) /* Not an error. */; else if (failure == 1) error ("initializer fails to determine size of %qT", *ptype); else if (failure == 2) { if (do_default) error ("array size missing in %qT", *ptype); } else if (failure == 3) error ("zero-size array %qT", *ptype); *ptype = error_mark_node; } return failure; } /* Return zero if something is declared to be a member of type CTYPE when in the context of CUR_TYPE. STRING is the error message to print in that case. Otherwise, quietly return 1. */ static int member_function_or_else (tree ctype, tree cur_type, enum overload_flags flags) { if (ctype && ctype != cur_type) { if (flags == DTOR_FLAG) error ("destructor for alien class %qT cannot be a member", ctype); else error ("constructor for alien class %qT cannot be a member", ctype); return 0; } return 1; } /* Subroutine of `grokdeclarator'. */ /* Generate errors possibly applicable for a given set of specifiers. This is for ARM $7.1.2. */ static void bad_specifiers (tree object, enum bad_spec_place type, int virtualp, int quals, int inlinep, int friendp, int raises, const location_t* locations) { switch (type) { case BSP_VAR: if (virtualp) error_at (locations[ds_virtual], "%qD declared as a %<virtual%> variable", object); if (quals) error ("%<const%> and %<volatile%> function specifiers on " "%qD invalid in variable declaration", object); break; case BSP_PARM: if (virtualp) error_at (locations[ds_virtual], "%qD declared as a %<virtual%> parameter", object); if (inlinep) error_at (locations[ds_inline], "%qD declared as an %<inline%> parameter", object); if (quals) error ("%<const%> and %<volatile%> function specifiers on " "%qD invalid in parameter declaration", object); break; case BSP_TYPE: if (virtualp) error_at (locations[ds_virtual], "%qD declared as a %<virtual%> type", object); if (inlinep) error_at (locations[ds_inline], "%qD declared as an %<inline%> type", object); if (quals) error ("%<const%> and %<volatile%> function specifiers on " "%qD invalid in type declaration", object); break; case BSP_FIELD: if (virtualp) error_at (locations[ds_virtual], "%qD declared as a %<virtual%> field", object); if (inlinep) error_at (locations[ds_inline], "%qD declared as an %<inline%> field", object); if (quals) error ("%<const%> and %<volatile%> function specifiers on " "%qD invalid in field declaration", object); break; default: gcc_unreachable(); } if (friendp) error ("%q+D declared as a friend", object); if (raises && !flag_noexcept_type && (TREE_CODE (object) == TYPE_DECL || (!TYPE_PTRFN_P (TREE_TYPE (object)) && !TYPE_REFFN_P (TREE_TYPE (object)) && !TYPE_PTRMEMFUNC_P (TREE_TYPE (object))))) error ("%q+D declared with an exception specification", object); } /* DECL is a member function or static data member and is presently being defined. Check that the definition is taking place in a valid namespace. */ static void check_class_member_definition_namespace (tree decl) { /* These checks only apply to member functions and static data members. */ gcc_assert (VAR_OR_FUNCTION_DECL_P (decl)); /* We check for problems with specializations in pt.c in check_specialization_namespace, where we can issue better diagnostics. */ if (processing_specialization) return; /* We check this in check_explicit_instantiation_namespace. */ if (processing_explicit_instantiation) return; /* [class.mfct] A member function definition that appears outside of the class definition shall appear in a namespace scope enclosing the class definition. [class.static.data] The definition for a static data member shall appear in a namespace scope enclosing the member's class definition. */ if (!is_ancestor (current_namespace, DECL_CONTEXT (decl))) permerror (input_location, "definition of %qD is not in namespace enclosing %qT", decl, DECL_CONTEXT (decl)); } /* Build a PARM_DECL for the "this" parameter of FN. TYPE is the METHOD_TYPE for a non-static member function; QUALS are the cv-qualifiers that apply to the function. */ tree build_this_parm (tree fn, tree type, cp_cv_quals quals) { tree this_type; tree qual_type; tree parm; cp_cv_quals this_quals; if (CLASS_TYPE_P (type)) { this_type = cp_build_qualified_type (type, quals & ~TYPE_QUAL_RESTRICT); this_type = build_pointer_type (this_type); } else this_type = type_of_this_parm (type); /* The `this' parameter is implicitly `const'; it cannot be assigned to. */ this_quals = (quals & TYPE_QUAL_RESTRICT) | TYPE_QUAL_CONST; qual_type = cp_build_qualified_type (this_type, this_quals); parm = build_artificial_parm (fn, this_identifier, qual_type); cp_apply_type_quals_to_decl (this_quals, parm); return parm; } /* DECL is a static member function. Complain if it was declared with function-cv-quals. */ static void check_static_quals (tree decl, cp_cv_quals quals) { if (quals != TYPE_UNQUALIFIED) error ("static member function %q#D declared with type qualifiers", decl); } // Check that FN takes no arguments and returns bool. static void check_concept_fn (tree fn) { // A constraint is nullary. if (DECL_ARGUMENTS (fn)) error_at (DECL_SOURCE_LOCATION (fn), "concept %q#D declared with function parameters", fn); // The declared return type of the concept shall be bool, and // it shall not be deduced from it definition. tree type = TREE_TYPE (TREE_TYPE (fn)); if (is_auto (type)) error_at (DECL_SOURCE_LOCATION (fn), "concept %q#D declared with a deduced return type", fn); else if (type != boolean_type_node) error_at (DECL_SOURCE_LOCATION (fn), "concept %q#D with non-%<bool%> return type %qT", fn, type); } /* Helper function. Replace the temporary this parameter injected during cp_finish_omp_declare_simd with the real this parameter. */ static tree declare_simd_adjust_this (tree *tp, int *walk_subtrees, void *data) { tree this_parm = (tree) data; if (TREE_CODE (*tp) == PARM_DECL && DECL_NAME (*tp) == this_identifier && *tp != this_parm) *tp = this_parm; else if (TYPE_P (*tp)) *walk_subtrees = 0; return NULL_TREE; } /* CTYPE is class type, or null if non-class. TYPE is type this FUNCTION_DECL should have, either FUNCTION_TYPE or METHOD_TYPE. DECLARATOR is the function's name. PARMS is a chain of PARM_DECLs for the function. VIRTUALP is truthvalue of whether the function is virtual or not. FLAGS are to be passed through to `grokclassfn'. QUALS are qualifiers indicating whether the function is `const' or `volatile'. RAISES is a list of exceptions that this function can raise. CHECK is 1 if we must find this method in CTYPE, 0 if we should not look, and -1 if we should not call `grokclassfn' at all. SFK is the kind of special function (if any) for the new function. Returns `NULL_TREE' if something goes wrong, after issuing applicable error messages. */ static tree grokfndecl (tree ctype, tree type, tree declarator, tree parms, tree orig_declarator, const cp_decl_specifier_seq *declspecs, tree decl_reqs, int virtualp, enum overload_flags flags, cp_cv_quals quals, cp_ref_qualifier rqual, tree raises, int check, int friendp, int publicp, int inlinep, bool deletedp, special_function_kind sfk, bool funcdef_flag, bool late_return_type_p, int template_count, tree in_namespace, tree* attrlist, location_t location) { tree decl; int staticp = ctype && TREE_CODE (type) == FUNCTION_TYPE; tree t; if (location == UNKNOWN_LOCATION) location = input_location; /* Was the concept specifier present? */ bool concept_p = inlinep & 4; /* Concept declarations must have a corresponding definition. */ if (concept_p && !funcdef_flag) { error_at (location, "concept %qD has no definition", declarator); return NULL_TREE; } type = build_cp_fntype_variant (type, rqual, raises, late_return_type_p); decl = build_lang_decl_loc (location, FUNCTION_DECL, declarator, type); /* Set the constraints on the declaration. */ if (flag_concepts) { tree tmpl_reqs = NULL_TREE; tree ctx = friendp ? current_class_type : ctype; bool block_local = TREE_CODE (current_scope ()) == FUNCTION_DECL; bool memtmpl = (!block_local && (processing_template_decl > template_class_depth (ctx))); if (memtmpl) tmpl_reqs = TEMPLATE_PARMS_CONSTRAINTS (current_template_parms); tree ci = build_constraints (tmpl_reqs, decl_reqs); if (concept_p && ci) { error_at (location, "a function concept cannot be constrained"); ci = NULL_TREE; } /* C++20 CA378: Remove non-templated constrained functions. */ if (ci && (block_local || (!flag_concepts_ts && (!processing_template_decl || (friendp && !memtmpl && !funcdef_flag))))) { error_at (location, "constraints on a non-templated function"); ci = NULL_TREE; } set_constraints (decl, ci); } if (TREE_CODE (type) == METHOD_TYPE) { tree parm = build_this_parm (decl, type, quals); DECL_CHAIN (parm) = parms; parms = parm; /* Allocate space to hold the vptr bit if needed. */ SET_DECL_ALIGN (decl, MINIMUM_METHOD_BOUNDARY); } DECL_ARGUMENTS (decl) = parms; for (t = parms; t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = decl; /* Propagate volatile out from type to decl. */ if (TYPE_VOLATILE (type)) TREE_THIS_VOLATILE (decl) = 1; /* Setup decl according to sfk. */ switch (sfk) { case sfk_constructor: case sfk_copy_constructor: case sfk_move_constructor: DECL_CXX_CONSTRUCTOR_P (decl) = 1; DECL_NAME (decl) = ctor_identifier; break; case sfk_destructor: DECL_CXX_DESTRUCTOR_P (decl) = 1; DECL_NAME (decl) = dtor_identifier; break; default: break; } if (friendp && TREE_CODE (orig_declarator) == TEMPLATE_ID_EXPR) { if (funcdef_flag) error_at (location, "defining explicit specialization %qD in friend declaration", orig_declarator); else { tree fns = TREE_OPERAND (orig_declarator, 0); tree args = TREE_OPERAND (orig_declarator, 1); if (PROCESSING_REAL_TEMPLATE_DECL_P ()) { /* Something like `template <class T> friend void f<T>()'. */ error_at (location, "invalid use of template-id %qD in declaration " "of primary template", orig_declarator); return NULL_TREE; } /* A friend declaration of the form friend void f<>(). Record the information in the TEMPLATE_ID_EXPR. */ SET_DECL_IMPLICIT_INSTANTIATION (decl); gcc_assert (identifier_p (fns) || OVL_P (fns)); DECL_TEMPLATE_INFO (decl) = build_template_info (fns, args); for (t = TYPE_ARG_TYPES (TREE_TYPE (decl)); t; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t) && TREE_CODE (TREE_PURPOSE (t)) == DEFERRED_PARSE) { error_at (defparse_location (TREE_PURPOSE (t)), "default arguments are not allowed in declaration " "of friend template specialization %qD", decl); return NULL_TREE; } if (inlinep & 1) { error_at (declspecs->locations[ds_inline], "%<inline%> is not allowed in declaration of friend " "template specialization %qD", decl); return NULL_TREE; } } } /* C++17 11.3.6/4: "If a friend declaration specifies a default argument expression, that declaration shall be a definition..." */ if (friendp && !funcdef_flag) { for (tree t = FUNCTION_FIRST_USER_PARMTYPE (decl); t && t != void_list_node; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) { permerror (DECL_SOURCE_LOCATION (decl), "friend declaration of %qD specifies default " "arguments and isn%'t a definition", decl); break; } } /* If this decl has namespace scope, set that up. */ if (in_namespace) set_decl_namespace (decl, in_namespace, friendp); else if (ctype) DECL_CONTEXT (decl) = ctype; else DECL_CONTEXT (decl) = FROB_CONTEXT (current_decl_namespace ()); /* `main' and builtins have implicit 'C' linkage. */ if (ctype == NULL_TREE && DECL_FILE_SCOPE_P (decl) && current_lang_name == lang_name_cplusplus && (MAIN_NAME_P (declarator) || (IDENTIFIER_LENGTH (declarator) > 10 && IDENTIFIER_POINTER (declarator)[0] == '_' && IDENTIFIER_POINTER (declarator)[1] == '_' && strncmp (IDENTIFIER_POINTER (declarator)+2, "builtin_", 8) == 0) || (targetcm.cxx_implicit_extern_c && (targetcm.cxx_implicit_extern_c (IDENTIFIER_POINTER (declarator)))))) SET_DECL_LANGUAGE (decl, lang_c); /* Should probably propagate const out from type to decl I bet (mrs). */ if (staticp) { DECL_STATIC_FUNCTION_P (decl) = 1; DECL_CONTEXT (decl) = ctype; } if (deletedp) DECL_DELETED_FN (decl) = 1; if (ctype && funcdef_flag) check_class_member_definition_namespace (decl); if (ctype == NULL_TREE && DECL_MAIN_P (decl)) { if (PROCESSING_REAL_TEMPLATE_DECL_P()) error_at (location, "cannot declare %<::main%> to be a template"); if (inlinep & 1) error_at (declspecs->locations[ds_inline], "cannot declare %<::main%> to be inline"); if (inlinep & 2) error_at (declspecs->locations[ds_constexpr], "cannot declare %<::main%> to be %qs", "constexpr"); if (inlinep & 8) error_at (declspecs->locations[ds_consteval], "cannot declare %<::main%> to be %qs", "consteval"); if (!publicp) error_at (location, "cannot declare %<::main%> to be static"); inlinep = 0; publicp = 1; } /* Members of anonymous types and local classes have no linkage; make them internal. If a typedef is made later, this will be changed. */ if (ctype && (!TREE_PUBLIC (TYPE_MAIN_DECL (ctype)) || decl_function_context (TYPE_MAIN_DECL (ctype)))) publicp = 0; if (publicp && cxx_dialect == cxx98) { /* [basic.link]: A name with no linkage (notably, the name of a class or enumeration declared in a local scope) shall not be used to declare an entity with linkage. DR 757 relaxes this restriction for C++0x. */ no_linkage_error (decl); } TREE_PUBLIC (decl) = publicp; if (! publicp) { DECL_INTERFACE_KNOWN (decl) = 1; DECL_NOT_REALLY_EXTERN (decl) = 1; } /* If the declaration was declared inline, mark it as such. */ if (inlinep) { DECL_DECLARED_INLINE_P (decl) = 1; if (publicp) DECL_COMDAT (decl) = 1; } if (inlinep & 2) DECL_DECLARED_CONSTEXPR_P (decl) = true; else if (inlinep & 8) { DECL_DECLARED_CONSTEXPR_P (decl) = true; SET_DECL_IMMEDIATE_FUNCTION_P (decl); } // If the concept declaration specifier was found, check // that the declaration satisfies the necessary requirements. if (concept_p) { DECL_DECLARED_CONCEPT_P (decl) = true; check_concept_fn (decl); } DECL_EXTERNAL (decl) = 1; if (TREE_CODE (type) == FUNCTION_TYPE) { if (quals || rqual) TREE_TYPE (decl) = apply_memfn_quals (TREE_TYPE (decl), TYPE_UNQUALIFIED, REF_QUAL_NONE); if (quals) { error (ctype ? G_("static member function %qD cannot have cv-qualifier") : G_("non-member function %qD cannot have cv-qualifier"), decl); quals = TYPE_UNQUALIFIED; } if (rqual) { error (ctype ? G_("static member function %qD cannot have ref-qualifier") : G_("non-member function %qD cannot have ref-qualifier"), decl); rqual = REF_QUAL_NONE; } } if (deduction_guide_p (decl)) { if (!DECL_NAMESPACE_SCOPE_P (decl)) { error_at (location, "deduction guide %qD must be declared at " "namespace scope", decl); return NULL_TREE; } tree type = TREE_TYPE (DECL_NAME (decl)); if (in_namespace == NULL_TREE && CP_DECL_CONTEXT (decl) != CP_TYPE_CONTEXT (type)) { error_at (location, "deduction guide %qD must be declared in the " "same scope as %qT", decl, type); inform (location_of (type), " declared here"); return NULL_TREE; } if (funcdef_flag) error_at (location, "deduction guide %qD must not have a function body", decl); } else if (IDENTIFIER_ANY_OP_P (DECL_NAME (decl)) && !grok_op_properties (decl, /*complain=*/true)) return NULL_TREE; else if (UDLIT_OPER_P (DECL_NAME (decl))) { bool long_long_unsigned_p; bool long_double_p; const char *suffix = NULL; /* [over.literal]/6: Literal operators shall not have C linkage. */ if (DECL_LANGUAGE (decl) == lang_c) { error_at (location, "literal operator with C linkage"); maybe_show_extern_c_location (); return NULL_TREE; } if (DECL_NAMESPACE_SCOPE_P (decl)) { if (!check_literal_operator_args (decl, &long_long_unsigned_p, &long_double_p)) { error_at (location, "%qD has invalid argument list", decl); return NULL_TREE; } suffix = UDLIT_OP_SUFFIX (DECL_NAME (decl)); if (long_long_unsigned_p) { if (cpp_interpret_int_suffix (parse_in, suffix, strlen (suffix))) warning_at (location, 0, "integer suffix %qs" " shadowed by implementation", suffix); } else if (long_double_p) { if (cpp_interpret_float_suffix (parse_in, suffix, strlen (suffix))) warning_at (location, 0, "floating-point suffix %qs" " shadowed by implementation", suffix); } /* 17.6.3.3.5 */ if (suffix[0] != '_' && !current_function_decl && !(friendp && !funcdef_flag)) warning_at (location, OPT_Wliteral_suffix, "literal operator suffixes not preceded by %<_%>" " are reserved for future standardization"); } else { error_at (location, "%qD must be a non-member function", decl); return NULL_TREE; } } if (funcdef_flag) /* Make the init_value nonzero so pushdecl knows this is not tentative. error_mark_node is replaced later with the BLOCK. */ DECL_INITIAL (decl) = error_mark_node; if (TYPE_NOTHROW_P (type) || nothrow_libfn_p (decl)) TREE_NOTHROW (decl) = 1; if (flag_openmp || flag_openmp_simd) { /* Adjust "omp declare simd" attributes. */ tree ods = lookup_attribute ("omp declare simd", *attrlist); if (ods) { tree attr; for (attr = ods; attr; attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))) { if (TREE_CODE (type) == METHOD_TYPE) walk_tree (&TREE_VALUE (attr), declare_simd_adjust_this, DECL_ARGUMENTS (decl), NULL); if (TREE_VALUE (attr) != NULL_TREE) { tree cl = TREE_VALUE (TREE_VALUE (attr)); cl = c_omp_declare_simd_clauses_to_numbers (DECL_ARGUMENTS (decl), cl); if (cl) TREE_VALUE (TREE_VALUE (attr)) = cl; else TREE_VALUE (attr) = NULL_TREE; } } } } /* Caller will do the rest of this. */ if (check < 0) return decl; if (ctype != NULL_TREE) grokclassfn (ctype, decl, flags); /* 12.4/3 */ if (cxx_dialect >= cxx11 && DECL_DESTRUCTOR_P (decl) && !TYPE_BEING_DEFINED (DECL_CONTEXT (decl)) && !processing_template_decl) deduce_noexcept_on_destructor (decl); decl = check_explicit_specialization (orig_declarator, decl, template_count, 2 * funcdef_flag + 4 * (friendp != 0) + 8 * concept_p, *attrlist); if (decl == error_mark_node) return NULL_TREE; if (DECL_STATIC_FUNCTION_P (decl)) check_static_quals (decl, quals); if (attrlist) { cplus_decl_attributes (&decl, *attrlist, 0); *attrlist = NULL_TREE; } /* Check main's type after attributes have been applied. */ if (ctype == NULL_TREE && DECL_MAIN_P (decl)) { if (!same_type_p (TREE_TYPE (TREE_TYPE (decl)), integer_type_node)) { tree oldtypeargs = TYPE_ARG_TYPES (TREE_TYPE (decl)); tree newtype; error_at (declspecs->locations[ds_type_spec], "%<::main%> must return %<int%>"); newtype = build_function_type (integer_type_node, oldtypeargs); TREE_TYPE (decl) = newtype; } if (warn_main) check_main_parameter_types (decl); } if (ctype != NULL_TREE && check) { tree old_decl = check_classfn (ctype, decl, (processing_template_decl > template_class_depth (ctype)) ? current_template_parms : NULL_TREE); if (old_decl == error_mark_node) return NULL_TREE; if (old_decl) { tree ok; tree pushed_scope; if (TREE_CODE (old_decl) == TEMPLATE_DECL) /* Because grokfndecl is always supposed to return a FUNCTION_DECL, we pull out the DECL_TEMPLATE_RESULT here. We depend on our callers to figure out that its really a template that's being returned. */ old_decl = DECL_TEMPLATE_RESULT (old_decl); if (DECL_STATIC_FUNCTION_P (old_decl) && TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE) { /* Remove the `this' parm added by grokclassfn. */ revert_static_member_fn (decl); check_static_quals (decl, quals); } if (DECL_ARTIFICIAL (old_decl)) { error ("definition of implicitly-declared %qD", old_decl); return NULL_TREE; } else if (DECL_DEFAULTED_FN (old_decl)) { error ("definition of explicitly-defaulted %q+D", decl); inform (DECL_SOURCE_LOCATION (old_decl), "%q#D explicitly defaulted here", old_decl); return NULL_TREE; } /* Since we've smashed OLD_DECL to its DECL_TEMPLATE_RESULT, we must do the same to DECL. */ if (TREE_CODE (decl) == TEMPLATE_DECL) decl = DECL_TEMPLATE_RESULT (decl); /* Attempt to merge the declarations. This can fail, in the case of some invalid specialization declarations. */ pushed_scope = push_scope (ctype); ok = duplicate_decls (decl, old_decl); if (pushed_scope) pop_scope (pushed_scope); if (!ok) { error ("no %q#D member function declared in class %qT", decl, ctype); return NULL_TREE; } if (ok == error_mark_node) return NULL_TREE; return old_decl; } } if (DECL_CONSTRUCTOR_P (decl) && !grok_ctor_properties (ctype, decl)) return NULL_TREE; if (ctype == NULL_TREE || check) return decl; if (virtualp) DECL_VIRTUAL_P (decl) = 1; return decl; } /* decl is a FUNCTION_DECL. specifiers are the parsed virt-specifiers. Set flags to reflect the virt-specifiers. Returns decl. */ static tree set_virt_specifiers (tree decl, cp_virt_specifiers specifiers) { if (decl == NULL_TREE) return decl; if (specifiers & VIRT_SPEC_OVERRIDE) DECL_OVERRIDE_P (decl) = 1; if (specifiers & VIRT_SPEC_FINAL) DECL_FINAL_P (decl) = 1; return decl; } /* DECL is a VAR_DECL for a static data member. Set flags to reflect the linkage that DECL will receive in the object file. */ static void set_linkage_for_static_data_member (tree decl) { /* A static data member always has static storage duration and external linkage. Note that static data members are forbidden in local classes -- the only situation in which a class has non-external linkage. */ TREE_PUBLIC (decl) = 1; TREE_STATIC (decl) = 1; /* For non-template classes, static data members are always put out in exactly those files where they are defined, just as with ordinary namespace-scope variables. */ if (!processing_template_decl) DECL_INTERFACE_KNOWN (decl) = 1; } /* Create a VAR_DECL named NAME with the indicated TYPE. If SCOPE is non-NULL, it is the class type or namespace containing the variable. If SCOPE is NULL, the variable should is created in the innermost enclosing scope. */ static tree grokvardecl (tree type, tree name, tree orig_declarator, const cp_decl_specifier_seq *declspecs, int initialized, int type_quals, int inlinep, bool conceptp, int template_count, tree scope, location_t location) { tree decl; tree explicit_scope; gcc_assert (!name || identifier_p (name)); bool constp = (type_quals & TYPE_QUAL_CONST) != 0; bool volatilep = (type_quals & TYPE_QUAL_VOLATILE) != 0; /* Compute the scope in which to place the variable, but remember whether or not that scope was explicitly specified by the user. */ explicit_scope = scope; if (!scope) { /* An explicit "extern" specifier indicates a namespace-scope variable. */ if (declspecs->storage_class == sc_extern) scope = current_decl_namespace (); else if (!at_function_scope_p ()) scope = current_scope (); } if (scope && (/* If the variable is a namespace-scope variable declared in a template, we need DECL_LANG_SPECIFIC. */ (TREE_CODE (scope) == NAMESPACE_DECL && processing_template_decl) /* Similarly for namespace-scope variables with language linkage other than C++. */ || (TREE_CODE (scope) == NAMESPACE_DECL && current_lang_name != lang_name_cplusplus) /* Similarly for static data members. */ || TYPE_P (scope) /* Similarly for explicit specializations. */ || (orig_declarator && TREE_CODE (orig_declarator) == TEMPLATE_ID_EXPR))) decl = build_lang_decl_loc (location, VAR_DECL, name, type); else decl = build_decl (location, VAR_DECL, name, type); if (explicit_scope && TREE_CODE (explicit_scope) == NAMESPACE_DECL) set_decl_namespace (decl, explicit_scope, 0); else DECL_CONTEXT (decl) = FROB_CONTEXT (scope); if (declspecs->storage_class == sc_extern) { DECL_THIS_EXTERN (decl) = 1; DECL_EXTERNAL (decl) = !initialized; } if (DECL_CLASS_SCOPE_P (decl)) { set_linkage_for_static_data_member (decl); /* This function is only called with out-of-class definitions. */ DECL_EXTERNAL (decl) = 0; check_class_member_definition_namespace (decl); } /* At top level, either `static' or no s.c. makes a definition (perhaps tentative), and absence of `static' makes it public. */ else if (toplevel_bindings_p ()) { TREE_PUBLIC (decl) = (declspecs->storage_class != sc_static && (DECL_THIS_EXTERN (decl) || ! constp || volatilep || inlinep)); TREE_STATIC (decl) = ! DECL_EXTERNAL (decl); } /* Not at top level, only `static' makes a static definition. */ else { TREE_STATIC (decl) = declspecs->storage_class == sc_static; TREE_PUBLIC (decl) = DECL_EXTERNAL (decl); } if (decl_spec_seq_has_spec_p (declspecs, ds_thread)) { if (DECL_EXTERNAL (decl) || TREE_STATIC (decl)) { CP_DECL_THREAD_LOCAL_P (decl) = true; if (!processing_template_decl) set_decl_tls_model (decl, decl_default_tls_model (decl)); } if (declspecs->gnu_thread_keyword_p) SET_DECL_GNU_TLS_P (decl); } /* If the type of the decl has no linkage, make sure that we'll notice that in mark_used. */ if (cxx_dialect > cxx98 && decl_linkage (decl) != lk_none && DECL_LANG_SPECIFIC (decl) == NULL && !DECL_EXTERN_C_P (decl) && no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/false)) retrofit_lang_decl (decl); if (TREE_PUBLIC (decl)) { /* [basic.link]: A name with no linkage (notably, the name of a class or enumeration declared in a local scope) shall not be used to declare an entity with linkage. DR 757 relaxes this restriction for C++0x. */ if (cxx_dialect < cxx11) no_linkage_error (decl); } else DECL_INTERFACE_KNOWN (decl) = 1; if (DECL_NAME (decl) && MAIN_NAME_P (DECL_NAME (decl)) && scope == global_namespace) error_at (DECL_SOURCE_LOCATION (decl), "cannot declare %<::main%> to be a global variable"); /* Check that the variable can be safely declared as a concept. Note that this also forbids explicit specializations. */ if (conceptp) { if (!processing_template_decl) { error_at (declspecs->locations[ds_concept], "a non-template variable cannot be %<concept%>"); return NULL_TREE; } else DECL_DECLARED_CONCEPT_P (decl) = true; if (!same_type_ignoring_top_level_qualifiers_p (type, boolean_type_node)) error_at (declspecs->locations[ds_type_spec], "concept must have type %<bool%>"); if (TEMPLATE_PARMS_CONSTRAINTS (current_template_parms)) { error_at (location, "a variable concept cannot be constrained"); TEMPLATE_PARMS_CONSTRAINTS (current_template_parms) = NULL_TREE; } } else if (flag_concepts && processing_template_decl > template_class_depth (scope)) { tree reqs = TEMPLATE_PARMS_CONSTRAINTS (current_template_parms); tree ci = build_constraints (reqs, NULL_TREE); set_constraints (decl, ci); } // Handle explicit specializations and instantiations of variable templates. if (orig_declarator) decl = check_explicit_specialization (orig_declarator, decl, template_count, conceptp * 8); return decl != error_mark_node ? decl : NULL_TREE; } /* Create and return a canonical pointer to member function type, for TYPE, which is a POINTER_TYPE to a METHOD_TYPE. */ tree build_ptrmemfunc_type (tree type) { tree field, fields; tree t; if (type == error_mark_node) return type; /* Make sure that we always have the unqualified pointer-to-member type first. */ if (cp_cv_quals quals = cp_type_quals (type)) { tree unqual = build_ptrmemfunc_type (TYPE_MAIN_VARIANT (type)); return cp_build_qualified_type (unqual, quals); } /* If a canonical type already exists for this type, use it. We use this method instead of type_hash_canon, because it only does a simple equality check on the list of field members. */ t = TYPE_PTRMEMFUNC_TYPE (type); if (t) return t; t = make_node (RECORD_TYPE); /* Let the front end know this is a pointer to member function. */ TYPE_PTRMEMFUNC_FLAG (t) = 1; field = build_decl (input_location, FIELD_DECL, pfn_identifier, type); DECL_NONADDRESSABLE_P (field) = 1; fields = field; field = build_decl (input_location, FIELD_DECL, delta_identifier, delta_type_node); DECL_NONADDRESSABLE_P (field) = 1; DECL_CHAIN (field) = fields; fields = field; finish_builtin_struct (t, "__ptrmemfunc_type", fields, ptr_type_node); /* Zap out the name so that the back end will give us the debugging information for this anonymous RECORD_TYPE. */ TYPE_NAME (t) = NULL_TREE; /* Cache this pointer-to-member type so that we can find it again later. */ TYPE_PTRMEMFUNC_TYPE (type) = t; if (TYPE_STRUCTURAL_EQUALITY_P (type)) SET_TYPE_STRUCTURAL_EQUALITY (t); else if (TYPE_CANONICAL (type) != type) TYPE_CANONICAL (t) = build_ptrmemfunc_type (TYPE_CANONICAL (type)); return t; } /* Create and return a pointer to data member type. */ tree build_ptrmem_type (tree class_type, tree member_type) { if (TREE_CODE (member_type) == METHOD_TYPE) { cp_cv_quals quals = type_memfn_quals (member_type); cp_ref_qualifier rqual = type_memfn_rqual (member_type); member_type = build_memfn_type (member_type, class_type, quals, rqual); return build_ptrmemfunc_type (build_pointer_type (member_type)); } else { gcc_assert (TREE_CODE (member_type) != FUNCTION_TYPE); return build_offset_type (class_type, member_type); } } /* DECL is a VAR_DECL defined in-class, whose TYPE is also given. Check to see that the definition is valid. Issue appropriate error messages. */ static void check_static_variable_definition (tree decl, tree type) { /* Avoid redundant diagnostics on out-of-class definitions. */ if (!current_class_type || !TYPE_BEING_DEFINED (current_class_type)) ; /* Can't check yet if we don't know the type. */ else if (dependent_type_p (type)) ; /* If DECL is declared constexpr, we'll do the appropriate checks in check_initializer. Similarly for inline static data members. */ else if (DECL_P (decl) && (DECL_DECLARED_CONSTEXPR_P (decl) || DECL_VAR_DECLARED_INLINE_P (decl))) ; else if (cxx_dialect >= cxx11 && !INTEGRAL_OR_ENUMERATION_TYPE_P (type)) { if (!COMPLETE_TYPE_P (type)) error_at (DECL_SOURCE_LOCATION (decl), "in-class initialization of static data member %q#D of " "incomplete type", decl); else if (literal_type_p (type)) permerror (DECL_SOURCE_LOCATION (decl), "%<constexpr%> needed for in-class initialization of " "static data member %q#D of non-integral type", decl); else error_at (DECL_SOURCE_LOCATION (decl), "in-class initialization of static data member %q#D of " "non-literal type", decl); } /* Motion 10 at San Diego: If a static const integral data member is initialized with an integral constant expression, the initializer may appear either in the declaration (within the class), or in the definition, but not both. If it appears in the class, the member is a member constant. The file-scope definition is always required. */ else if (!ARITHMETIC_TYPE_P (type) && TREE_CODE (type) != ENUMERAL_TYPE) error_at (DECL_SOURCE_LOCATION (decl), "invalid in-class initialization of static data member " "of non-integral type %qT", type); else if (!CP_TYPE_CONST_P (type)) error_at (DECL_SOURCE_LOCATION (decl), "ISO C++ forbids in-class initialization of non-const " "static member %qD", decl); else if (!INTEGRAL_OR_ENUMERATION_TYPE_P (type)) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wpedantic, "ISO C++ forbids initialization of member constant " "%qD of non-integral type %qT", decl, type); } /* *expr_p is part of the TYPE_SIZE of a variably-sized array. If any SAVE_EXPRs in *expr_p wrap expressions with side-effects, break those expressions out into temporary variables so that walk_tree doesn't step into them (c++/15764). */ static tree stabilize_save_expr_r (tree *expr_p, int *walk_subtrees, void *data) { hash_set<tree> *pset = (hash_set<tree> *)data; tree expr = *expr_p; if (TREE_CODE (expr) == SAVE_EXPR) { tree op = TREE_OPERAND (expr, 0); cp_walk_tree (&op, stabilize_save_expr_r, data, pset); if (TREE_SIDE_EFFECTS (op)) TREE_OPERAND (expr, 0) = get_temp_regvar (TREE_TYPE (op), op); *walk_subtrees = 0; } else if (!EXPR_P (expr) || !TREE_SIDE_EFFECTS (expr)) *walk_subtrees = 0; return NULL; } /* Entry point for the above. */ static void stabilize_vla_size (tree size) { hash_set<tree> pset; /* Break out any function calls into temporary variables. */ cp_walk_tree (&size, stabilize_save_expr_r, &pset, &pset); } /* Reduce a SIZEOF_EXPR to its value. */ tree fold_sizeof_expr (tree t) { tree r; if (SIZEOF_EXPR_TYPE_P (t)) r = cxx_sizeof_or_alignof_type (EXPR_LOCATION (t), TREE_TYPE (TREE_OPERAND (t, 0)), SIZEOF_EXPR, false, false); else if (TYPE_P (TREE_OPERAND (t, 0))) r = cxx_sizeof_or_alignof_type (EXPR_LOCATION (t), TREE_OPERAND (t, 0), SIZEOF_EXPR, false, false); else r = cxx_sizeof_or_alignof_expr (EXPR_LOCATION (t), TREE_OPERAND (t, 0), SIZEOF_EXPR, false); if (r == error_mark_node) r = size_one_node; return r; } /* Given the SIZE (i.e., number of elements) in an array, compute an appropriate index type for the array. If non-NULL, NAME is the name of the entity being declared. */ static tree compute_array_index_type_loc (location_t name_loc, tree name, tree size, tsubst_flags_t complain) { if (error_operand_p (size)) return error_mark_node; /* The type of the index being computed. */ tree itype; /* The original numeric size as seen in the source code before conversion to size_t. */ tree origsize = size; location_t loc = cp_expr_loc_or_loc (size, name ? name_loc : input_location); if (!type_dependent_expression_p (size)) { origsize = size = mark_rvalue_use (size); if (cxx_dialect < cxx11 && TREE_CODE (size) == NOP_EXPR && TREE_SIDE_EFFECTS (size)) /* In C++98, we mark a non-constant array bound with a magic NOP_EXPR with TREE_SIDE_EFFECTS; don't fold in that case. */; else { size = build_converted_constant_expr (size_type_node, size, complain); /* Pedantically a constant expression is required here and so __builtin_is_constant_evaluated () should fold to true if it is successfully folded into a constant. */ size = fold_non_dependent_expr (size, complain, /*manifestly_const_eval=*/true); if (!TREE_CONSTANT (size)) size = origsize; } if (error_operand_p (size)) return error_mark_node; /* The array bound must be an integer type. */ tree type = TREE_TYPE (size); if (!INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (type)) { if (!(complain & tf_error)) return error_mark_node; if (name) error_at (loc, "size of array %qD has non-integral type %qT", name, type); else error_at (loc, "size of array has non-integral type %qT", type); size = integer_one_node; } } /* A type is dependent if it is...an array type constructed from any dependent type or whose size is specified by a constant expression that is value-dependent. */ /* We can only call value_dependent_expression_p on integral constant expressions. */ if (processing_template_decl && potential_constant_expression (size) && value_dependent_expression_p (size)) { /* Just build the index type and mark that it requires structural equality checks. */ in_template: itype = build_index_type (build_min (MINUS_EXPR, sizetype, size, size_one_node)); TYPE_DEPENDENT_P (itype) = 1; TYPE_DEPENDENT_P_VALID (itype) = 1; SET_TYPE_STRUCTURAL_EQUALITY (itype); return itype; } if (TREE_CODE (size) != INTEGER_CST) { tree folded = cp_fully_fold (size); if (TREE_CODE (folded) == INTEGER_CST) { if (name) pedwarn (loc, OPT_Wpedantic, "size of array %qD is not an " "integral constant-expression", name); else pedwarn (loc, OPT_Wpedantic, "size of array is not an integral constant-expression"); } if (TREE_CONSTANT (size) && !TREE_CONSTANT (folded)) /* We might have lost the TREE_CONSTANT flag e.g. when we are folding a conversion from a pointer to integral type. In that case issue an error below and don't treat this as a VLA. */; else /* Use the folded result for VLAs, too; it will have resolved SIZEOF_EXPR. */ size = folded; } /* Normally, the array-bound will be a constant. */ if (TREE_CODE (size) == INTEGER_CST) { /* The size to use in diagnostics that reflects the constant size used in the source, rather than SIZE massaged above. */ tree diagsize = size; /* If the original size before conversion to size_t was signed and negative, convert it to ssizetype to restore the sign. */ if (!TYPE_UNSIGNED (TREE_TYPE (origsize)) && TREE_CODE (size) == INTEGER_CST && tree_int_cst_sign_bit (size)) { diagsize = fold_convert (ssizetype, size); /* Clear the overflow bit that may have been set as a result of the conversion from the sizetype of the new size to ssizetype. */ TREE_OVERFLOW (diagsize) = false; } /* Verify that the array has a positive number of elements and issue the appropriate diagnostic if it doesn't. */ if (!valid_array_size_p (loc, diagsize, name, (complain & tf_error))) { if (!(complain & tf_error)) return error_mark_node; size = integer_one_node; } /* As an extension we allow zero-sized arrays. */ else if (integer_zerop (size)) { if (!(complain & tf_error)) /* We must fail if performing argument deduction (as indicated by the state of complain), so that another substitution can be found. */ return error_mark_node; else if (name) pedwarn (loc, OPT_Wpedantic, "ISO C++ forbids zero-size array %qD", name); else pedwarn (loc, OPT_Wpedantic, "ISO C++ forbids zero-size array"); } } else if (TREE_CONSTANT (size) /* We don't allow VLAs at non-function scopes, or during tentative template substitution. */ || !at_function_scope_p () || !(complain & tf_error)) { if (!(complain & tf_error)) return error_mark_node; /* `(int) &fn' is not a valid array bound. */ if (name) error_at (loc, "size of array %qD is not an integral constant-expression", name); else error_at (loc, "size of array is not an integral constant-expression"); size = integer_one_node; } else if (pedantic && warn_vla != 0) { if (name) pedwarn (name_loc, OPT_Wvla, "ISO C++ forbids variable length array %qD", name); else pedwarn (input_location, OPT_Wvla, "ISO C++ forbids variable length array"); } else if (warn_vla > 0) { if (name) warning_at (name_loc, OPT_Wvla, "variable length array %qD is used", name); else warning (OPT_Wvla, "variable length array is used"); } if (processing_template_decl && !TREE_CONSTANT (size)) goto in_template; else { if (!TREE_CONSTANT (size)) { /* A variable sized array. Arrange for the SAVE_EXPR on the inside of the MINUS_EXPR, which allows the -1 to get folded with the +1 that happens when building TYPE_SIZE. */ size = variable_size (size); stabilize_vla_size (size); } /* Compute the index of the largest element in the array. It is one less than the number of elements in the array. We save and restore PROCESSING_TEMPLATE_DECL so that computations in cp_build_binary_op will be appropriately folded. */ { processing_template_decl_sentinel s; itype = cp_build_binary_op (input_location, MINUS_EXPR, cp_convert (ssizetype, size, complain), cp_convert (ssizetype, integer_one_node, complain), complain); itype = maybe_constant_value (itype); } if (!TREE_CONSTANT (itype)) { if (sanitize_flags_p (SANITIZE_VLA) && current_function_decl != NULL_TREE) { /* We have to add 1 -- in the ubsan routine we generate LE_EXPR rather than LT_EXPR. */ tree t = fold_build2 (PLUS_EXPR, TREE_TYPE (itype), itype, build_one_cst (TREE_TYPE (itype))); t = ubsan_instrument_vla (input_location, t); finish_expr_stmt (t); } } /* Make sure that there was no overflow when creating to a signed index type. (For example, on a 32-bit machine, an array with size 2^32 - 1 is too big.) */ else if (TREE_CODE (itype) == INTEGER_CST && TREE_OVERFLOW (itype)) { if (!(complain & tf_error)) return error_mark_node; error ("overflow in array dimension"); TREE_OVERFLOW (itype) = 0; } } /* Create and return the appropriate index type. */ itype = build_index_type (itype); /* If the index type were dependent, we would have returned early, so remember that it isn't. */ TYPE_DEPENDENT_P (itype) = 0; TYPE_DEPENDENT_P_VALID (itype) = 1; return itype; } tree compute_array_index_type (tree name, tree size, tsubst_flags_t complain) { return compute_array_index_type_loc (input_location, name, size, complain); } /* Returns the scope (if any) in which the entity declared by DECLARATOR will be located. If the entity was declared with an unqualified name, NULL_TREE is returned. */ tree get_scope_of_declarator (const cp_declarator *declarator) { while (declarator && declarator->kind != cdk_id) declarator = declarator->declarator; /* If the declarator-id is a SCOPE_REF, the scope in which the declaration occurs is the first operand. */ if (declarator && declarator->u.id.qualifying_scope) return declarator->u.id.qualifying_scope; /* Otherwise, the declarator is not a qualified name; the entity will be declared in the current scope. */ return NULL_TREE; } /* Returns an ARRAY_TYPE for an array with SIZE elements of the indicated TYPE. If non-NULL, NAME is the NAME of the declaration with this type. */ static tree create_array_type_for_decl (tree name, tree type, tree size, location_t loc) { tree itype = NULL_TREE; /* If things have already gone awry, bail now. */ if (type == error_mark_node || size == error_mark_node) return error_mark_node; /* 8.3.4/1: If the type of the identifier of D contains the auto type-specifier, the program is ill-formed. */ if (type_uses_auto (type)) { if (name) error_at (loc, "%qD declared as array of %qT", name, type); else error ("creating array of %qT", type); return error_mark_node; } /* If there are some types which cannot be array elements, issue an error-message and return. */ switch (TREE_CODE (type)) { case VOID_TYPE: if (name) error_at (loc, "declaration of %qD as array of void", name); else error ("creating array of void"); return error_mark_node; case FUNCTION_TYPE: if (name) error_at (loc, "declaration of %qD as array of functions", name); else error ("creating array of functions"); return error_mark_node; case REFERENCE_TYPE: if (name) error_at (loc, "declaration of %qD as array of references", name); else error ("creating array of references"); return error_mark_node; case METHOD_TYPE: if (name) error_at (loc, "declaration of %qD as array of function members", name); else error ("creating array of function members"); return error_mark_node; default: break; } if (!verify_type_context (name ? loc : input_location, TCTX_ARRAY_ELEMENT, type)) return error_mark_node; /* [dcl.array] The constant expressions that specify the bounds of the arrays can be omitted only for the first member of the sequence. */ if (TREE_CODE (type) == ARRAY_TYPE && !TYPE_DOMAIN (type)) { if (name) error_at (loc, "declaration of %qD as multidimensional array must " "have bounds for all dimensions except the first", name); else error ("multidimensional array must have bounds for all " "dimensions except the first"); return error_mark_node; } /* Figure out the index type for the array. */ if (size) itype = compute_array_index_type_loc (loc, name, size, tf_warning_or_error); /* [dcl.array] T is called the array element type; this type shall not be [...] an abstract class type. */ abstract_virtuals_error (name, type); return build_cplus_array_type (type, itype); } /* Returns the smallest location that is not UNKNOWN_LOCATION. */ static location_t min_location (location_t loca, location_t locb) { if (loca == UNKNOWN_LOCATION || (locb != UNKNOWN_LOCATION && linemap_location_before_p (line_table, locb, loca))) return locb; return loca; } /* Returns the smallest location != UNKNOWN_LOCATION among the three stored in LOCATIONS[ds_const], LOCATIONS[ds_volatile], and LOCATIONS[ds_restrict]. */ static location_t smallest_type_quals_location (int type_quals, const location_t* locations) { location_t loc = UNKNOWN_LOCATION; if (type_quals & TYPE_QUAL_CONST) loc = locations[ds_const]; if (type_quals & TYPE_QUAL_VOLATILE) loc = min_location (loc, locations[ds_volatile]); if (type_quals & TYPE_QUAL_RESTRICT) loc = min_location (loc, locations[ds_restrict]); return loc; } /* Returns the smallest among the latter and locations[ds_type_spec]. */ static location_t smallest_type_location (int type_quals, const location_t* locations) { location_t loc = smallest_type_quals_location (type_quals, locations); return min_location (loc, locations[ds_type_spec]); } static location_t smallest_type_location (const cp_decl_specifier_seq *declspecs) { int type_quals = get_type_quals (declspecs); return smallest_type_location (type_quals, declspecs->locations); } /* Check that it's OK to declare a function with the indicated TYPE and TYPE_QUALS. SFK indicates the kind of special function (if any) that this function is. OPTYPE is the type given in a conversion operator declaration, or the class type for a constructor/destructor. Returns the actual return type of the function; that may be different than TYPE if an error occurs, or for certain special functions. */ static tree check_special_function_return_type (special_function_kind sfk, tree type, tree optype, int type_quals, const location_t* locations) { switch (sfk) { case sfk_constructor: if (type) error_at (smallest_type_location (type_quals, locations), "return type specification for constructor invalid"); else if (type_quals != TYPE_UNQUALIFIED) error_at (smallest_type_quals_location (type_quals, locations), "qualifiers are not allowed on constructor declaration"); if (targetm.cxx.cdtor_returns_this ()) type = build_pointer_type (optype); else type = void_type_node; break; case sfk_destructor: if (type) error_at (smallest_type_location (type_quals, locations), "return type specification for destructor invalid"); else if (type_quals != TYPE_UNQUALIFIED) error_at (smallest_type_quals_location (type_quals, locations), "qualifiers are not allowed on destructor declaration"); /* We can't use the proper return type here because we run into problems with ambiguous bases and covariant returns. */ if (targetm.cxx.cdtor_returns_this ()) type = build_pointer_type (void_type_node); else type = void_type_node; break; case sfk_conversion: if (type) error_at (smallest_type_location (type_quals, locations), "return type specified for %<operator %T%>", optype); else if (type_quals != TYPE_UNQUALIFIED) error_at (smallest_type_quals_location (type_quals, locations), "qualifiers are not allowed on declaration of " "%<operator %T%>", optype); type = optype; break; case sfk_deduction_guide: if (type) error_at (smallest_type_location (type_quals, locations), "return type specified for deduction guide"); else if (type_quals != TYPE_UNQUALIFIED) error_at (smallest_type_quals_location (type_quals, locations), "qualifiers are not allowed on declaration of " "deduction guide"); if (TREE_CODE (optype) == TEMPLATE_TEMPLATE_PARM) { error ("template template parameter %qT in declaration of " "deduction guide", optype); type = error_mark_node; } else type = make_template_placeholder (CLASSTYPE_TI_TEMPLATE (optype)); for (int i = 0; i < ds_last; ++i) if (i != ds_explicit && locations[i]) error_at (locations[i], "%<decl-specifier%> in declaration of deduction guide"); break; default: gcc_unreachable (); } return type; } /* A variable or data member (whose unqualified name is IDENTIFIER) has been declared with the indicated TYPE. If the TYPE is not acceptable, issue an error message and return a type to use for error-recovery purposes. */ tree check_var_type (tree identifier, tree type, location_t loc) { if (VOID_TYPE_P (type)) { if (!identifier) error_at (loc, "unnamed variable or field declared void"); else if (identifier_p (identifier)) { gcc_assert (!IDENTIFIER_ANY_OP_P (identifier)); error_at (loc, "variable or field %qE declared void", identifier); } else error_at (loc, "variable or field declared void"); type = error_mark_node; } return type; } /* Handle declaring DECL as an inline variable. */ static void mark_inline_variable (tree decl, location_t loc) { bool inlinep = true; if (! toplevel_bindings_p ()) { error_at (loc, "%<inline%> specifier invalid for variable " "%qD declared at block scope", decl); inlinep = false; } else if (cxx_dialect < cxx17) pedwarn (loc, 0, "inline variables are only available " "with %<-std=c++17%> or %<-std=gnu++17%>"); if (inlinep) { retrofit_lang_decl (decl); SET_DECL_VAR_DECLARED_INLINE_P (decl); } } /* Assign a typedef-given name to a class or enumeration type declared as anonymous at first. This was split out of grokdeclarator because it is also used in libcc1. */ void name_unnamed_type (tree type, tree decl) { gcc_assert (TYPE_UNNAMED_P (type)); /* Replace the anonymous name with the real name everywhere. */ for (tree t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) if (IDENTIFIER_ANON_P (TYPE_IDENTIFIER (t))) /* We do not rename the debug info representing the unnamed tagged type because the standard says in [dcl.typedef] that the naming applies only for linkage purposes. */ /*debug_hooks->set_name (t, decl);*/ TYPE_NAME (t) = decl; /* If this is a typedef within a template class, the nested type is a (non-primary) template. The name for the template needs updating as well. */ if (TYPE_LANG_SPECIFIC (type) && CLASSTYPE_TEMPLATE_INFO (type)) DECL_NAME (CLASSTYPE_TI_TEMPLATE (type)) = TYPE_IDENTIFIER (type); /* Adjust linkage now that we aren't unnamed anymore. */ reset_type_linkage (type); /* FIXME remangle member functions; member functions of a type with external linkage have external linkage. */ /* Check that our job is done, and that it would fail if we attempted to do it again. */ gcc_assert (!TYPE_UNNAMED_P (type)); } /* Given declspecs and a declarator (abstract or otherwise), determine the name and type of the object declared and construct a DECL node for it. DECLSPECS points to the representation of declaration-specifier sequence that precedes declarator. DECL_CONTEXT says which syntactic context this declaration is in: NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL. FUNCDEF for a function definition. Like NORMAL but a few different error messages in each case. Return value may be zero meaning this definition is too screwy to try to parse. MEMFUNCDEF for a function definition. Like FUNCDEF but prepares to handle member functions (which have FIELD context). Return value may be zero meaning this definition is too screwy to try to parse. PARM for a parameter declaration (either within a function prototype or before a function body). Make a PARM_DECL, or return void_type_node. TPARM for a template parameter declaration. CATCHPARM for a parameter declaration before a catch clause. TYPENAME if for a typename (in a cast or sizeof). Don't make a DECL node; just return the ..._TYPE node. FIELD for a struct or union field; make a FIELD_DECL. BITFIELD for a field with specified width. INITIALIZED is as for start_decl. ATTRLIST is a pointer to the list of attributes, which may be NULL if there are none; *ATTRLIST may be modified if attributes from inside the declarator should be applied to the declaration. When this function is called, scoping variables (such as CURRENT_CLASS_TYPE) should reflect the scope in which the declaration occurs, not the scope in which the new declaration will be placed. For example, on: void S::f() { ... } when grokdeclarator is called for `S::f', the CURRENT_CLASS_TYPE should not be `S'. Returns a DECL (if a declarator is present), a TYPE (if there is no declarator, in cases like "struct S;"), or the ERROR_MARK_NODE if an error occurs. */ tree grokdeclarator (const cp_declarator *declarator, cp_decl_specifier_seq *declspecs, enum decl_context decl_context, int initialized, tree* attrlist) { tree type = NULL_TREE; int longlong = 0; int explicit_intN = 0; int int_n_alt = 0; int virtualp, explicitp, friendp, inlinep, staticp; int explicit_int = 0; int explicit_char = 0; int defaulted_int = 0; tree typedef_decl = NULL_TREE; const char *name = NULL; tree typedef_type = NULL_TREE; /* True if this declarator is a function definition. */ bool funcdef_flag = false; cp_declarator_kind innermost_code = cdk_error; int bitfield = 0; #if 0 /* See the code below that used this. */ tree decl_attr = NULL_TREE; #endif /* Keep track of what sort of function is being processed so that we can warn about default return values, or explicit return values which do not match prescribed defaults. */ special_function_kind sfk = sfk_none; tree dname = NULL_TREE; tree ctor_return_type = NULL_TREE; enum overload_flags flags = NO_SPECIAL; /* cv-qualifiers that apply to the declarator, for a declaration of a member function. */ cp_cv_quals memfn_quals = TYPE_UNQUALIFIED; /* virt-specifiers that apply to the declarator, for a declaration of a member function. */ cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED; /* ref-qualifier that applies to the declarator, for a declaration of a member function. */ cp_ref_qualifier rqual = REF_QUAL_NONE; /* cv-qualifiers that apply to the type specified by the DECLSPECS. */ int type_quals = get_type_quals (declspecs); tree raises = NULL_TREE; int template_count = 0; tree returned_attrs = NULL_TREE; tree parms = NULL_TREE; const cp_declarator *id_declarator; /* The unqualified name of the declarator; either an IDENTIFIER_NODE, BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */ tree unqualified_id; /* The class type, if any, in which this entity is located, or NULL_TREE if none. Note that this value may be different from the current class type; for example if an attempt is made to declare "A::f" inside "B", this value will be "A". */ tree ctype = current_class_type; /* The NAMESPACE_DECL for the namespace in which this entity is located. If an unqualified name is used to declare the entity, this value will be NULL_TREE, even if the entity is located at namespace scope. */ tree in_namespace = NULL_TREE; cp_storage_class storage_class; bool unsigned_p, signed_p, short_p, long_p, thread_p; bool type_was_error_mark_node = false; bool parameter_pack_p = declarator ? declarator->parameter_pack_p : false; bool template_type_arg = false; bool template_parm_flag = false; bool typedef_p = decl_spec_seq_has_spec_p (declspecs, ds_typedef); bool constexpr_p = decl_spec_seq_has_spec_p (declspecs, ds_constexpr); bool constinit_p = decl_spec_seq_has_spec_p (declspecs, ds_constinit); bool consteval_p = decl_spec_seq_has_spec_p (declspecs, ds_consteval); bool late_return_type_p = false; bool array_parameter_p = false; tree reqs = NULL_TREE; signed_p = decl_spec_seq_has_spec_p (declspecs, ds_signed); unsigned_p = decl_spec_seq_has_spec_p (declspecs, ds_unsigned); short_p = decl_spec_seq_has_spec_p (declspecs, ds_short); long_p = decl_spec_seq_has_spec_p (declspecs, ds_long); longlong = decl_spec_seq_has_spec_p (declspecs, ds_long_long); explicit_intN = declspecs->explicit_intN_p; int_n_alt = declspecs->int_n_alt; thread_p = decl_spec_seq_has_spec_p (declspecs, ds_thread); // Was concept_p specified? Note that ds_concept // implies ds_constexpr! bool concept_p = decl_spec_seq_has_spec_p (declspecs, ds_concept); if (concept_p) constexpr_p = true; if (decl_context == FUNCDEF) funcdef_flag = true, decl_context = NORMAL; else if (decl_context == MEMFUNCDEF) funcdef_flag = true, decl_context = FIELD; else if (decl_context == BITFIELD) bitfield = 1, decl_context = FIELD; else if (decl_context == TEMPLATE_TYPE_ARG) template_type_arg = true, decl_context = TYPENAME; else if (decl_context == TPARM) template_parm_flag = true, decl_context = PARM; if (initialized == SD_DEFAULTED || initialized == SD_DELETED) funcdef_flag = true; location_t typespec_loc = smallest_type_location (type_quals, declspecs->locations); if (typespec_loc == UNKNOWN_LOCATION) typespec_loc = input_location; location_t id_loc = declarator ? declarator->id_loc : input_location; if (id_loc == UNKNOWN_LOCATION) id_loc = input_location; /* Look inside a declarator for the name being declared and get it as a string, for an error message. */ for (id_declarator = declarator; id_declarator; id_declarator = id_declarator->declarator) { if (id_declarator->kind != cdk_id) innermost_code = id_declarator->kind; switch (id_declarator->kind) { case cdk_function: if (id_declarator->declarator && id_declarator->declarator->kind == cdk_id) { sfk = id_declarator->declarator->u.id.sfk; if (sfk == sfk_destructor) flags = DTOR_FLAG; } break; case cdk_id: { tree qualifying_scope = id_declarator->u.id.qualifying_scope; tree decl = id_declarator->u.id.unqualified_name; if (!decl) break; if (qualifying_scope) { if (check_for_bare_parameter_packs (qualifying_scope, id_declarator->id_loc)) return error_mark_node; if (at_function_scope_p ()) { /* [dcl.meaning] A declarator-id shall not be qualified except for ... None of the cases are permitted in block scope. */ if (qualifying_scope == global_namespace) error ("invalid use of qualified-name %<::%D%>", decl); else if (TYPE_P (qualifying_scope)) error ("invalid use of qualified-name %<%T::%D%>", qualifying_scope, decl); else error ("invalid use of qualified-name %<%D::%D%>", qualifying_scope, decl); return error_mark_node; } else if (TYPE_P (qualifying_scope)) { ctype = qualifying_scope; if (!MAYBE_CLASS_TYPE_P (ctype)) { error_at (id_declarator->id_loc, "%q#T is not a class or namespace", ctype); ctype = NULL_TREE; } else if (innermost_code != cdk_function && current_class_type && !uniquely_derived_from_p (ctype, current_class_type)) { error_at (id_declarator->id_loc, "invalid use of qualified-name %<%T::%D%>", qualifying_scope, decl); return error_mark_node; } } else if (TREE_CODE (qualifying_scope) == NAMESPACE_DECL) in_namespace = qualifying_scope; } switch (TREE_CODE (decl)) { case BIT_NOT_EXPR: { if (innermost_code != cdk_function) { error_at (EXPR_LOCATION (decl), "declaration of %qE as non-function", decl); return error_mark_node; } else if (!qualifying_scope && !(current_class_type && at_class_scope_p ())) { error_at (EXPR_LOCATION (decl), "declaration of %qE as non-member", decl); return error_mark_node; } tree type = TREE_OPERAND (decl, 0); if (TYPE_P (type)) type = constructor_name (type); name = identifier_to_locale (IDENTIFIER_POINTER (type)); dname = decl; } break; case TEMPLATE_ID_EXPR: { tree fns = TREE_OPERAND (decl, 0); dname = fns; if (!identifier_p (dname)) dname = OVL_NAME (dname); } /* Fall through. */ case IDENTIFIER_NODE: if (identifier_p (decl)) dname = decl; if (IDENTIFIER_KEYWORD_P (dname)) { error ("declarator-id missing; using reserved word %qD", dname); name = identifier_to_locale (IDENTIFIER_POINTER (dname)); } else if (!IDENTIFIER_CONV_OP_P (dname)) name = identifier_to_locale (IDENTIFIER_POINTER (dname)); else { gcc_assert (flags == NO_SPECIAL); flags = TYPENAME_FLAG; sfk = sfk_conversion; tree glob = get_global_binding (dname); if (glob && TREE_CODE (glob) == TYPE_DECL) name = identifier_to_locale (IDENTIFIER_POINTER (dname)); else name = "<invalid operator>"; } break; default: gcc_unreachable (); } break; } case cdk_array: case cdk_pointer: case cdk_reference: case cdk_ptrmem: break; case cdk_decomp: name = "structured binding"; break; case cdk_error: return error_mark_node; default: gcc_unreachable (); } if (id_declarator->kind == cdk_id) break; } /* [dcl.fct.edf] The declarator in a function-definition shall have the form D1 ( parameter-declaration-clause) ... */ if (funcdef_flag && innermost_code != cdk_function) { error_at (id_loc, "function definition does not declare parameters"); return error_mark_node; } if (flags == TYPENAME_FLAG && innermost_code != cdk_function && ! (ctype && !declspecs->any_specifiers_p)) { error_at (id_loc, "declaration of %qD as non-function", dname); return error_mark_node; } if (dname && identifier_p (dname)) { if (UDLIT_OPER_P (dname) && innermost_code != cdk_function) { error_at (id_loc, "declaration of %qD as non-function", dname); return error_mark_node; } if (IDENTIFIER_ANY_OP_P (dname)) { if (typedef_p) { error_at (id_loc, "declaration of %qD as %<typedef%>", dname); return error_mark_node; } else if (decl_context == PARM || decl_context == CATCHPARM) { error_at (id_loc, "declaration of %qD as parameter", dname); return error_mark_node; } } } /* Anything declared one level down from the top level must be one of the parameters of a function (because the body is at least two levels down). */ /* This heuristic cannot be applied to C++ nodes! Fixed, however, by not allowing C++ class definitions to specify their parameters with xdecls (must be spec.d in the parmlist). Since we now wait to push a class scope until we are sure that we are in a legitimate method context, we must set oldcname explicitly (since current_class_name is not yet alive). We also want to avoid calling this a PARM if it is in a namespace. */ if (decl_context == NORMAL && !toplevel_bindings_p ()) { cp_binding_level *b = current_binding_level; current_binding_level = b->level_chain; if (current_binding_level != 0 && toplevel_bindings_p ()) decl_context = PARM; current_binding_level = b; } if (name == NULL) name = decl_context == PARM ? "parameter" : "type name"; if (consteval_p && constexpr_p) { error_at (declspecs->locations[ds_consteval], "both %qs and %qs specified", "constexpr", "consteval"); return error_mark_node; } if (concept_p && typedef_p) { error_at (declspecs->locations[ds_concept], "%qs cannot appear in a typedef declaration", "concept"); return error_mark_node; } if (constexpr_p && typedef_p) { error_at (declspecs->locations[ds_constexpr], "%qs cannot appear in a typedef declaration", "constexpr"); return error_mark_node; } if (consteval_p && typedef_p) { error_at (declspecs->locations[ds_consteval], "%qs cannot appear in a typedef declaration", "consteval"); return error_mark_node; } if (constinit_p && typedef_p) { error_at (declspecs->locations[ds_constinit], "%qs cannot appear in a typedef declaration", "constinit"); return error_mark_node; } /* [dcl.spec]/2 "At most one of the constexpr, consteval, and constinit keywords shall appear in a decl-specifier-seq." */ if (constinit_p && constexpr_p) { gcc_rich_location richloc (declspecs->locations[ds_constinit]); richloc.add_range (declspecs->locations[ds_constexpr]); error_at (&richloc, "can use at most one of the %<constinit%> and %<constexpr%> " "specifiers"); return error_mark_node; } /* If there were multiple types specified in the decl-specifier-seq, issue an error message. */ if (declspecs->multiple_types_p) { error_at (typespec_loc, "two or more data types in declaration of %qs", name); return error_mark_node; } if (declspecs->conflicting_specifiers_p) { error_at (min_location (declspecs->locations[ds_typedef], declspecs->locations[ds_storage_class]), "conflicting specifiers in declaration of %qs", name); return error_mark_node; } /* Extract the basic type from the decl-specifier-seq. */ type = declspecs->type; if (type == error_mark_node) { type = NULL_TREE; type_was_error_mark_node = true; } /* Ignore erroneous attributes. */ if (attrlist && *attrlist == error_mark_node) *attrlist = NULL_TREE; /* An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ temp_override<deprecated_states> ds (deprecated_state); if (attrlist && lookup_attribute ("deprecated", *attrlist)) deprecated_state = DEPRECATED_SUPPRESS; cp_warn_deprecated_use (type); if (type && TREE_CODE (type) == TYPE_DECL) { cp_warn_deprecated_use_scopes (CP_DECL_CONTEXT (type)); typedef_decl = type; type = TREE_TYPE (typedef_decl); if (DECL_ARTIFICIAL (typedef_decl)) cp_warn_deprecated_use (type); } /* No type at all: default to `int', and set DEFAULTED_INT because it was not a user-defined typedef. */ if (type == NULL_TREE) { if (signed_p || unsigned_p || long_p || short_p) { /* These imply 'int'. */ type = integer_type_node; defaulted_int = 1; } /* If we just have "complex", it is equivalent to "complex double". */ else if (!longlong && !explicit_intN && decl_spec_seq_has_spec_p (declspecs, ds_complex)) { type = double_type_node; pedwarn (declspecs->locations[ds_complex], OPT_Wpedantic, "ISO C++ does not support plain %<complex%> meaning " "%<double complex%>"); } } /* Gather flags. */ explicit_int = declspecs->explicit_int_p; explicit_char = declspecs->explicit_char_p; #if 0 /* See the code below that used this. */ if (typedef_decl) decl_attr = DECL_ATTRIBUTES (typedef_decl); #endif typedef_type = type; if (sfk == sfk_conversion || sfk == sfk_deduction_guide) ctor_return_type = TREE_TYPE (dname); else ctor_return_type = ctype; if (sfk != sfk_none) { type = check_special_function_return_type (sfk, type, ctor_return_type, type_quals, declspecs->locations); type_quals = TYPE_UNQUALIFIED; } else if (type == NULL_TREE) { int is_main; explicit_int = -1; /* We handle `main' specially here, because 'main () { }' is so common. With no options, it is allowed. With -Wreturn-type, it is a warning. It is only an error with -pedantic-errors. */ is_main = (funcdef_flag && dname && identifier_p (dname) && MAIN_NAME_P (dname) && ctype == NULL_TREE && in_namespace == NULL_TREE && current_namespace == global_namespace); if (type_was_error_mark_node) /* We've already issued an error, don't complain more. */; else if (in_system_header_at (id_loc) || flag_ms_extensions) /* Allow it, sigh. */; else if (! is_main) permerror (id_loc, "ISO C++ forbids declaration of %qs with no type", name); else if (pedantic) pedwarn (id_loc, OPT_Wpedantic, "ISO C++ forbids declaration of %qs with no type", name); else warning_at (id_loc, OPT_Wreturn_type, "ISO C++ forbids declaration of %qs with no type", name); if (type_was_error_mark_node && template_parm_flag) /* FIXME we should be able to propagate the error_mark_node as is for other contexts too. */ type = error_mark_node; else type = integer_type_node; } ctype = NULL_TREE; if (explicit_intN) { if (! int_n_enabled_p[declspecs->int_n_idx]) { error_at (declspecs->locations[ds_type_spec], "%<__int%d%> is not supported by this target", int_n_data[declspecs->int_n_idx].bitsize); explicit_intN = false; } /* Don't pedwarn if the alternate "__intN__" form has been used instead of "__intN". */ else if (!int_n_alt && pedantic) pedwarn (declspecs->locations[ds_type_spec], OPT_Wpedantic, "ISO C++ does not support %<__int%d%> for %qs", int_n_data[declspecs->int_n_idx].bitsize, name); } /* Now process the modifiers that were specified and check for invalid combinations. */ /* Long double is a special combination. */ if (long_p && !longlong && TYPE_MAIN_VARIANT (type) == double_type_node) { long_p = false; type = cp_build_qualified_type (long_double_type_node, cp_type_quals (type)); } /* Check all other uses of type modifiers. */ if (unsigned_p || signed_p || long_p || short_p) { location_t loc; const char *key; if (unsigned_p) { key = "unsigned"; loc = declspecs->locations[ds_unsigned]; } else if (signed_p) { key = "signed"; loc = declspecs->locations[ds_signed]; } else if (longlong) { key = "long long"; loc = declspecs->locations[ds_long_long]; } else if (long_p) { key = "long"; loc = declspecs->locations[ds_long]; } else /* if (short_p) */ { key = "short"; loc = declspecs->locations[ds_short]; } int ok = 0; if (signed_p && unsigned_p) { gcc_rich_location richloc (declspecs->locations[ds_signed]); richloc.add_range (declspecs->locations[ds_unsigned]); error_at (&richloc, "%<signed%> and %<unsigned%> specified together"); } else if (long_p && short_p) { gcc_rich_location richloc (declspecs->locations[ds_long]); richloc.add_range (declspecs->locations[ds_short]); error_at (&richloc, "%<long%> and %<short%> specified together"); } else if (TREE_CODE (type) != INTEGER_TYPE || type == char8_type_node || type == char16_type_node || type == char32_type_node || ((long_p || short_p) && (explicit_char || explicit_intN))) error_at (loc, "%qs specified with %qT", key, type); else if (!explicit_int && !defaulted_int && !explicit_char && !explicit_intN) { if (typedef_decl) { pedwarn (loc, OPT_Wpedantic, "%qs specified with %qT", key, type); ok = !flag_pedantic_errors; } else if (declspecs->decltype_p) error_at (loc, "%qs specified with %<decltype%>", key); else error_at (loc, "%qs specified with %<typeof%>", key); } else ok = 1; /* Discard the type modifiers if they are invalid. */ if (! ok) { unsigned_p = false; signed_p = false; long_p = false; short_p = false; longlong = 0; } } /* Decide whether an integer type is signed or not. Optionally treat bitfields as signed by default. */ if (unsigned_p /* [class.bit] It is implementation-defined whether a plain (neither explicitly signed or unsigned) char, short, int, or long bit-field is signed or unsigned. Naturally, we extend this to long long as well. Note that this does not include wchar_t. */ || (bitfield && !flag_signed_bitfields && !signed_p /* A typedef for plain `int' without `signed' can be controlled just like plain `int', but a typedef for `signed int' cannot be so controlled. */ && !(typedef_decl && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl)) && TREE_CODE (type) == INTEGER_TYPE && !same_type_p (TYPE_MAIN_VARIANT (type), wchar_type_node))) { if (explicit_intN) type = int_n_trees[declspecs->int_n_idx].unsigned_type; else if (longlong) type = long_long_unsigned_type_node; else if (long_p) type = long_unsigned_type_node; else if (short_p) type = short_unsigned_type_node; else if (type == char_type_node) type = unsigned_char_type_node; else if (typedef_decl) type = unsigned_type_for (type); else type = unsigned_type_node; } else if (signed_p && type == char_type_node) type = signed_char_type_node; else if (explicit_intN) type = int_n_trees[declspecs->int_n_idx].signed_type; else if (longlong) type = long_long_integer_type_node; else if (long_p) type = long_integer_type_node; else if (short_p) type = short_integer_type_node; if (decl_spec_seq_has_spec_p (declspecs, ds_complex)) { if (TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE) error_at (declspecs->locations[ds_complex], "complex invalid for %qs", name); /* If a modifier is specified, the resulting complex is the complex form of TYPE. E.g, "complex short" is "complex short int". */ else if (type == integer_type_node) type = complex_integer_type_node; else if (type == float_type_node) type = complex_float_type_node; else if (type == double_type_node) type = complex_double_type_node; else if (type == long_double_type_node) type = complex_long_double_type_node; else type = build_complex_type (type); } /* If we're using the injected-class-name to form a compound type or a declaration, replace it with the underlying class so we don't get redundant typedefs in the debug output. But if we are returning the type unchanged, leave it alone so that it's available to maybe_get_template_decl_from_type_decl. */ if (CLASS_TYPE_P (type) && DECL_SELF_REFERENCE_P (TYPE_NAME (type)) && type == TREE_TYPE (TYPE_NAME (type)) && (declarator || type_quals)) type = DECL_ORIGINAL_TYPE (TYPE_NAME (type)); type_quals |= cp_type_quals (type); type = cp_build_qualified_type_real (type, type_quals, ((((typedef_decl && !DECL_ARTIFICIAL (typedef_decl)) || declspecs->decltype_p) ? tf_ignore_bad_quals : 0) | tf_warning_or_error)); /* We might have ignored or rejected some of the qualifiers. */ type_quals = cp_type_quals (type); if (cxx_dialect >= cxx17 && type && is_auto (type) && innermost_code != cdk_function && id_declarator && declarator != id_declarator) if (tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (type)) { error_at (typespec_loc, "template placeholder type %qT must be followed " "by a simple declarator-id", type); inform (DECL_SOURCE_LOCATION (tmpl), "%qD declared here", tmpl); type = error_mark_node; } staticp = 0; inlinep = decl_spec_seq_has_spec_p (declspecs, ds_inline); virtualp = decl_spec_seq_has_spec_p (declspecs, ds_virtual); explicitp = decl_spec_seq_has_spec_p (declspecs, ds_explicit); storage_class = declspecs->storage_class; if (storage_class == sc_static) staticp = 1 + (decl_context == FIELD); if (virtualp) { if (staticp == 2) { gcc_rich_location richloc (declspecs->locations[ds_virtual]); richloc.add_range (declspecs->locations[ds_storage_class]); error_at (&richloc, "member %qD cannot be declared both %<virtual%> " "and %<static%>", dname); storage_class = sc_none; staticp = 0; } if (constexpr_p && cxx_dialect < cxx20) { gcc_rich_location richloc (declspecs->locations[ds_virtual]); richloc.add_range (declspecs->locations[ds_constexpr]); pedwarn (&richloc, OPT_Wpedantic, "member %qD can be declared both " "%<virtual%> and %<constexpr%> only in %<-std=c++20%> or " "%<-std=gnu++20%>", dname); } } friendp = decl_spec_seq_has_spec_p (declspecs, ds_friend); /* Issue errors about use of storage classes for parameters. */ if (decl_context == PARM) { if (typedef_p) { error_at (declspecs->locations[ds_typedef], "typedef declaration invalid in parameter declaration"); return error_mark_node; } else if (template_parm_flag && storage_class != sc_none) { error_at (min_location (declspecs->locations[ds_thread], declspecs->locations[ds_storage_class]), "storage class specified for template parameter %qs", name); return error_mark_node; } else if (storage_class == sc_static || storage_class == sc_extern || thread_p) { error_at (min_location (declspecs->locations[ds_thread], declspecs->locations[ds_storage_class]), "storage class specified for parameter %qs", name); return error_mark_node; } /* Function parameters cannot be concept. */ if (concept_p) { error_at (declspecs->locations[ds_concept], "a parameter cannot be declared %qs", "concept"); concept_p = 0; constexpr_p = 0; } /* Function parameters cannot be constexpr. If we saw one, moan and pretend it wasn't there. */ else if (constexpr_p) { error_at (declspecs->locations[ds_constexpr], "a parameter cannot be declared %qs", "constexpr"); constexpr_p = 0; } if (constinit_p) { error_at (declspecs->locations[ds_constinit], "a parameter cannot be declared %qs", "constinit"); constinit_p = 0; } if (consteval_p) { error_at (declspecs->locations[ds_consteval], "a parameter cannot be declared %qs", "consteval"); consteval_p = 0; } } /* Give error if `virtual' is used outside of class declaration. */ if (virtualp && (current_class_name == NULL_TREE || decl_context != FIELD)) { error_at (declspecs->locations[ds_virtual], "%<virtual%> outside class declaration"); virtualp = 0; } if (innermost_code == cdk_decomp) { location_t loc = (declarator->kind == cdk_reference ? declarator->declarator->id_loc : declarator->id_loc); if (inlinep) error_at (declspecs->locations[ds_inline], "structured binding declaration cannot be %qs", "inline"); if (typedef_p) error_at (declspecs->locations[ds_typedef], "structured binding declaration cannot be %qs", "typedef"); if (constexpr_p && !concept_p) error_at (declspecs->locations[ds_constexpr], "structured " "binding declaration cannot be %qs", "constexpr"); if (consteval_p) error_at (declspecs->locations[ds_consteval], "structured " "binding declaration cannot be %qs", "consteval"); if (thread_p && cxx_dialect < cxx20) pedwarn (declspecs->locations[ds_thread], 0, "structured binding declaration can be %qs only in " "%<-std=c++20%> or %<-std=gnu++20%>", declspecs->gnu_thread_keyword_p ? "__thread" : "thread_local"); if (concept_p) error_at (declspecs->locations[ds_concept], "structured binding declaration cannot be %qs", "concept"); /* [dcl.struct.bind] "A cv that includes volatile is deprecated." */ if (type_quals & TYPE_QUAL_VOLATILE) warning_at (declspecs->locations[ds_volatile], OPT_Wvolatile, "%<volatile%>-qualified structured binding is deprecated"); switch (storage_class) { case sc_none: break; case sc_register: error_at (loc, "structured binding declaration cannot be %qs", "register"); break; case sc_static: if (cxx_dialect < cxx20) pedwarn (loc, 0, "structured binding declaration can be %qs only in " "%<-std=c++20%> or %<-std=gnu++20%>", "static"); break; case sc_extern: error_at (loc, "structured binding declaration cannot be %qs", "extern"); break; case sc_mutable: error_at (loc, "structured binding declaration cannot be %qs", "mutable"); break; case sc_auto: error_at (loc, "structured binding declaration cannot be " "C++98 %<auto%>"); break; default: gcc_unreachable (); } if (TREE_CODE (type) != TEMPLATE_TYPE_PARM || TYPE_IDENTIFIER (type) != auto_identifier) { if (type != error_mark_node) { error_at (loc, "structured binding declaration cannot have " "type %qT", type); inform (loc, "type must be cv-qualified %<auto%> or reference to " "cv-qualified %<auto%>"); } type = build_qualified_type (make_auto (), type_quals); declspecs->type = type; } inlinep = 0; typedef_p = 0; constexpr_p = 0; consteval_p = 0; concept_p = 0; if (storage_class != sc_static) { storage_class = sc_none; declspecs->storage_class = sc_none; } } /* Static anonymous unions are dealt with here. */ if (staticp && decl_context == TYPENAME && declspecs->type && ANON_AGGR_TYPE_P (declspecs->type)) decl_context = FIELD; /* Warn about storage classes that are invalid for certain kinds of declarations (parameters, typenames, etc.). */ if (thread_p && ((storage_class && storage_class != sc_extern && storage_class != sc_static) || typedef_p)) { location_t loc = min_location (declspecs->locations[ds_thread], declspecs->locations[ds_storage_class]); error_at (loc, "multiple storage classes in declaration of %qs", name); thread_p = false; } if (decl_context != NORMAL && ((storage_class != sc_none && storage_class != sc_mutable) || thread_p)) { if ((decl_context == PARM || decl_context == CATCHPARM) && (storage_class == sc_register || storage_class == sc_auto)) ; else if (typedef_p) ; else if (decl_context == FIELD /* C++ allows static class elements. */ && storage_class == sc_static) /* C++ also allows inlines and signed and unsigned elements, but in those cases we don't come in here. */ ; else { location_t loc = min_location (declspecs->locations[ds_thread], declspecs->locations[ds_storage_class]); if (decl_context == FIELD) error_at (loc, "storage class specified for %qs", name); else if (decl_context == PARM || decl_context == CATCHPARM) error_at (loc, "storage class specified for parameter %qs", name); else error_at (loc, "storage class specified for typename"); if (storage_class == sc_register || storage_class == sc_auto || storage_class == sc_extern || thread_p) storage_class = sc_none; } } else if (storage_class == sc_extern && funcdef_flag && ! toplevel_bindings_p ()) error ("nested function %qs declared %<extern%>", name); else if (toplevel_bindings_p ()) { if (storage_class == sc_auto) error_at (declspecs->locations[ds_storage_class], "top-level declaration of %qs specifies %<auto%>", name); } else if (thread_p && storage_class != sc_extern && storage_class != sc_static) { if (declspecs->gnu_thread_keyword_p) pedwarn (declspecs->locations[ds_thread], 0, "function-scope %qs implicitly auto and " "declared %<__thread%>", name); /* When thread_local is applied to a variable of block scope the storage-class-specifier static is implied if it does not appear explicitly. */ storage_class = declspecs->storage_class = sc_static; staticp = 1; } if (storage_class && friendp) { error_at (min_location (declspecs->locations[ds_thread], declspecs->locations[ds_storage_class]), "storage class specifiers invalid in friend function " "declarations"); storage_class = sc_none; staticp = 0; } if (!id_declarator) unqualified_id = NULL_TREE; else { unqualified_id = id_declarator->u.id.unqualified_name; switch (TREE_CODE (unqualified_id)) { case BIT_NOT_EXPR: unqualified_id = TREE_OPERAND (unqualified_id, 0); if (TYPE_P (unqualified_id)) unqualified_id = constructor_name (unqualified_id); break; case IDENTIFIER_NODE: case TEMPLATE_ID_EXPR: break; default: gcc_unreachable (); } } if (declspecs->std_attributes) { location_t attr_loc = declspecs->locations[ds_std_attribute]; if (warning_at (attr_loc, OPT_Wattributes, "attribute ignored")) inform (attr_loc, "an attribute that appertains to a type-specifier " "is ignored"); } /* Determine the type of the entity declared by recurring on the declarator. */ for (; declarator; declarator = declarator->declarator) { const cp_declarator *inner_declarator; tree attrs; if (type == error_mark_node) return error_mark_node; attrs = declarator->attributes; if (attrs) { int attr_flags; attr_flags = 0; if (declarator == NULL || declarator->kind == cdk_id) attr_flags |= (int) ATTR_FLAG_DECL_NEXT; if (declarator->kind == cdk_function) attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT; if (declarator->kind == cdk_array) attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT; tree late_attrs = NULL_TREE; if (decl_context != PARM && decl_context != TYPENAME) /* Assume that any attributes that get applied late to templates will DTRT when applied to the declaration as a whole. */ late_attrs = splice_template_attributes (&attrs, type); returned_attrs = decl_attributes (&type, chainon (returned_attrs, attrs), attr_flags); returned_attrs = chainon (late_attrs, returned_attrs); } inner_declarator = declarator->declarator; /* We don't want to warn in parameter context because we don't yet know if the parse will succeed, and this might turn out to be a constructor call. */ if (decl_context != PARM && decl_context != TYPENAME && !typedef_p && declarator->parenthesized != UNKNOWN_LOCATION /* If the type is class-like and the inner name used a global namespace qualifier, we need the parens. Unfortunately all we can tell is whether a qualified name was used or not. */ && !(inner_declarator && inner_declarator->kind == cdk_id && inner_declarator->u.id.qualifying_scope && (MAYBE_CLASS_TYPE_P (type) || TREE_CODE (type) == ENUMERAL_TYPE))) warning_at (declarator->parenthesized, OPT_Wparentheses, "unnecessary parentheses in declaration of %qs", name); if (declarator->kind == cdk_id || declarator->kind == cdk_decomp) break; switch (declarator->kind) { case cdk_array: type = create_array_type_for_decl (dname, type, declarator->u.array.bounds, declarator->id_loc); if (!valid_array_size_p (dname ? declarator->id_loc : input_location, type, dname)) type = error_mark_node; if (declarator->std_attributes) /* [dcl.array]/1: The optional attribute-specifier-seq appertains to the array. */ returned_attrs = chainon (returned_attrs, declarator->std_attributes); break; case cdk_function: { tree arg_types; int funcdecl_p; /* Declaring a function type. */ { iloc_sentinel ils (declspecs->locations[ds_type_spec]); abstract_virtuals_error (ACU_RETURN, type); } /* Pick up type qualifiers which should be applied to `this'. */ memfn_quals = declarator->u.function.qualifiers; /* Pick up virt-specifiers. */ virt_specifiers = declarator->u.function.virt_specifiers; /* And ref-qualifier, too */ rqual = declarator->u.function.ref_qualifier; /* And tx-qualifier. */ tree tx_qual = declarator->u.function.tx_qualifier; /* Pick up the exception specifications. */ raises = declarator->u.function.exception_specification; /* If the exception-specification is ill-formed, let's pretend there wasn't one. */ if (raises == error_mark_node) raises = NULL_TREE; if (reqs) error_at (location_of (reqs), "requires-clause on return type"); reqs = declarator->u.function.requires_clause; /* Say it's a definition only for the CALL_EXPR closest to the identifier. */ funcdecl_p = inner_declarator && inner_declarator->kind == cdk_id; /* Handle a late-specified return type. */ tree late_return_type = declarator->u.function.late_return_type; if (tree auto_node = type_uses_auto (type)) { if (!late_return_type && funcdecl_p) { if (current_class_type && LAMBDA_TYPE_P (current_class_type)) /* OK for C++11 lambdas. */; else if (cxx_dialect < cxx14) { error_at (typespec_loc, "%qs function uses " "%<auto%> type specifier without " "trailing return type", name); inform (typespec_loc, "deduced return type only available " "with %<-std=c++14%> or %<-std=gnu++14%>"); } else if (virtualp) { error_at (typespec_loc, "virtual function " "cannot have deduced return type"); virtualp = false; } } else if (!is_auto (type) && sfk != sfk_conversion) { error_at (typespec_loc, "%qs function with trailing " "return type has %qT as its type rather " "than plain %<auto%>", name, type); return error_mark_node; } else if (is_auto (type) && AUTO_IS_DECLTYPE (type)) { if (funcdecl_p) error_at (typespec_loc, "%qs function with trailing return type " "has %<decltype(auto)%> as its type " "rather than plain %<auto%>", name); else error_at (typespec_loc, "invalid use of %<decltype(auto)%>"); return error_mark_node; } tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (auto_node); if (!tmpl) if (tree late_auto = type_uses_auto (late_return_type)) tmpl = CLASS_PLACEHOLDER_TEMPLATE (late_auto); if (tmpl && funcdecl_p) { if (!dguide_name_p (unqualified_id)) { error_at (declarator->id_loc, "deduced class " "type %qD in function return type", DECL_NAME (tmpl)); inform (DECL_SOURCE_LOCATION (tmpl), "%qD declared here", tmpl); return error_mark_node; } else if (!late_return_type) { error_at (declarator->id_loc, "deduction guide " "for %qT must have trailing return " "type", TREE_TYPE (tmpl)); inform (DECL_SOURCE_LOCATION (tmpl), "%qD declared here", tmpl); return error_mark_node; } else if (CLASS_TYPE_P (late_return_type) && CLASSTYPE_TEMPLATE_INFO (late_return_type) && (CLASSTYPE_TI_TEMPLATE (late_return_type) == tmpl)) /* OK */; else error ("trailing return type %qT of deduction guide " "is not a specialization of %qT", late_return_type, TREE_TYPE (tmpl)); } } else if (late_return_type && sfk != sfk_conversion) { if (late_return_type == error_mark_node) return error_mark_node; if (cxx_dialect < cxx11) /* Not using maybe_warn_cpp0x because this should always be an error. */ error_at (typespec_loc, "trailing return type only available " "with %<-std=c++11%> or %<-std=gnu++11%>"); else error_at (typespec_loc, "%qs function with trailing " "return type not declared with %<auto%> " "type specifier", name); return error_mark_node; } type = splice_late_return_type (type, late_return_type); if (type == error_mark_node) return error_mark_node; if (late_return_type) { late_return_type_p = true; type_quals = cp_type_quals (type); } if (type_quals != TYPE_UNQUALIFIED) { if (SCALAR_TYPE_P (type) || VOID_TYPE_P (type)) warning_at (typespec_loc, OPT_Wignored_qualifiers, "type " "qualifiers ignored on function return type"); /* [dcl.fct] "A volatile-qualified return type is deprecated." */ if (type_quals & TYPE_QUAL_VOLATILE) warning_at (typespec_loc, OPT_Wvolatile, "%<volatile%>-qualified return type is " "deprecated"); /* We now know that the TYPE_QUALS don't apply to the decl, but to its return type. */ type_quals = TYPE_UNQUALIFIED; } /* Error about some types functions can't return. */ if (TREE_CODE (type) == FUNCTION_TYPE) { error_at (typespec_loc, "%qs declared as function returning " "a function", name); return error_mark_node; } if (TREE_CODE (type) == ARRAY_TYPE) { error_at (typespec_loc, "%qs declared as function returning " "an array", name); return error_mark_node; } if (constinit_p) { error_at (declspecs->locations[ds_constinit], "%<constinit%> on function return type is not " "allowed"); return error_mark_node; } /* Only plain decltype(auto) is allowed. */ if (tree a = type_uses_auto (type)) { if (AUTO_IS_DECLTYPE (a)) { if (a != type) { error_at (typespec_loc, "%qT as type rather than " "plain %<decltype(auto)%>", type); return error_mark_node; } else if (TYPE_QUALS (type) != TYPE_UNQUALIFIED) { error_at (typespec_loc, "%<decltype(auto)%> cannot be " "cv-qualified"); return error_mark_node; } } } if (ctype == NULL_TREE && decl_context == FIELD && funcdecl_p && friendp == 0) ctype = current_class_type; if (ctype && (sfk == sfk_constructor || sfk == sfk_destructor)) { /* We are within a class's scope. If our declarator name is the same as the class name, and we are defining a function, then it is a constructor/destructor, and therefore returns a void type. */ /* ISO C++ 12.4/2. A destructor may not be declared const or volatile. A destructor may not be static. A destructor may not be declared with ref-qualifier. ISO C++ 12.1. A constructor may not be declared const or volatile. A constructor may not be virtual. A constructor may not be static. A constructor may not be declared with ref-qualifier. */ if (staticp == 2) error_at (declspecs->locations[ds_storage_class], (flags == DTOR_FLAG) ? G_("destructor cannot be static member " "function") : G_("constructor cannot be static member " "function")); if (memfn_quals) { error ((flags == DTOR_FLAG) ? G_("destructors may not be cv-qualified") : G_("constructors may not be cv-qualified")); memfn_quals = TYPE_UNQUALIFIED; } if (rqual) { maybe_warn_cpp0x (CPP0X_REF_QUALIFIER); error ((flags == DTOR_FLAG) ? G_("destructors may not be ref-qualified") : G_("constructors may not be ref-qualified")); rqual = REF_QUAL_NONE; } if (decl_context == FIELD && !member_function_or_else (ctype, current_class_type, flags)) return error_mark_node; if (flags != DTOR_FLAG) { /* It's a constructor. */ if (explicitp == 1) explicitp = 2; if (virtualp) { permerror (declspecs->locations[ds_virtual], "constructors cannot be declared %<virtual%>"); virtualp = 0; } if (decl_context == FIELD && sfk != sfk_constructor) return error_mark_node; } if (decl_context == FIELD) staticp = 0; } else if (friendp) { if (virtualp) { /* Cannot be both friend and virtual. */ gcc_rich_location richloc (declspecs->locations[ds_virtual]); richloc.add_range (declspecs->locations[ds_friend]); error_at (&richloc, "virtual functions cannot be friends"); friendp = 0; } if (decl_context == NORMAL) error_at (declarator->id_loc, "friend declaration not in class definition"); if (current_function_decl && funcdef_flag) { error_at (declarator->id_loc, "cannot define friend function %qs in a local " "class definition", name); friendp = 0; } /* [class.friend]/6: A function can be defined in a friend declaration if the function name is unqualified. */ if (funcdef_flag && in_namespace) { if (in_namespace == global_namespace) error_at (declarator->id_loc, "friend function definition %qs cannot have " "a name qualified with %<::%>", name); else error_at (declarator->id_loc, "friend function definition %qs cannot have " "a name qualified with %<%D::%>", name, in_namespace); } } else if (ctype && sfk == sfk_conversion) { if (explicitp == 1) { maybe_warn_cpp0x (CPP0X_EXPLICIT_CONVERSION); explicitp = 2; } if (late_return_type_p) error ("a conversion function cannot have a trailing return type"); } else if (sfk == sfk_deduction_guide) { if (explicitp == 1) explicitp = 2; } tree pushed_scope = NULL_TREE; if (funcdecl_p && decl_context != FIELD && inner_declarator->u.id.qualifying_scope && CLASS_TYPE_P (inner_declarator->u.id.qualifying_scope)) pushed_scope = push_scope (inner_declarator->u.id.qualifying_scope); arg_types = grokparms (declarator->u.function.parameters, &parms); if (pushed_scope) pop_scope (pushed_scope); if (inner_declarator && inner_declarator->kind == cdk_id && inner_declarator->u.id.sfk == sfk_destructor && arg_types != void_list_node) { error_at (declarator->id_loc, "destructors may not have parameters"); arg_types = void_list_node; parms = NULL_TREE; } type = build_function_type (type, arg_types); tree attrs = declarator->std_attributes; if (tx_qual) { tree att = build_tree_list (tx_qual, NULL_TREE); /* transaction_safe applies to the type, but transaction_safe_dynamic applies to the function. */ if (is_attribute_p ("transaction_safe", tx_qual)) attrs = chainon (attrs, att); else returned_attrs = chainon (returned_attrs, att); } if (attrs) /* [dcl.fct]/2: The optional attribute-specifier-seq appertains to the function type. */ cplus_decl_attributes (&type, attrs, 0); if (raises) type = build_exception_variant (type, raises); } break; case cdk_pointer: case cdk_reference: case cdk_ptrmem: /* Filter out pointers-to-references and references-to-references. We can get these if a TYPE_DECL is used. */ if (TYPE_REF_P (type)) { if (declarator->kind != cdk_reference) { error ("cannot declare pointer to %q#T", type); type = TREE_TYPE (type); } /* In C++0x, we allow reference to reference declarations that occur indirectly through typedefs [7.1.3/8 dcl.typedef] and template type arguments [14.3.1/4 temp.arg.type]. The check for direct reference to reference declarations, which are still forbidden, occurs below. Reasoning behind the change can be found in DR106, DR540, and the rvalue reference proposals. */ else if (cxx_dialect == cxx98) { error ("cannot declare reference to %q#T", type); type = TREE_TYPE (type); } } else if (VOID_TYPE_P (type)) { if (declarator->kind == cdk_reference) error ("cannot declare reference to %q#T", type); else if (declarator->kind == cdk_ptrmem) error ("cannot declare pointer to %q#T member", type); } /* We now know that the TYPE_QUALS don't apply to the decl, but to the target of the pointer. */ type_quals = TYPE_UNQUALIFIED; /* This code used to handle METHOD_TYPE, but I don't think it's possible to get it here anymore. */ gcc_assert (TREE_CODE (type) != METHOD_TYPE); if (declarator->kind == cdk_ptrmem && TREE_CODE (type) == FUNCTION_TYPE) { memfn_quals |= type_memfn_quals (type); type = build_memfn_type (type, declarator->u.pointer.class_type, memfn_quals, rqual); if (type == error_mark_node) return error_mark_node; rqual = REF_QUAL_NONE; memfn_quals = TYPE_UNQUALIFIED; } if (TREE_CODE (type) == FUNCTION_TYPE && (type_memfn_quals (type) != TYPE_UNQUALIFIED || type_memfn_rqual (type) != REF_QUAL_NONE)) error (declarator->kind == cdk_reference ? G_("cannot declare reference to qualified function type %qT") : G_("cannot declare pointer to qualified function type %qT"), type); /* When the pointed-to type involves components of variable size, care must be taken to ensure that the size evaluation code is emitted early enough to dominate all the possible later uses and late enough for the variables on which it depends to have been assigned. This is expected to happen automatically when the pointed-to type has a name/declaration of it's own, but special attention is required if the type is anonymous. We handle the NORMAL and FIELD contexts here by inserting a dummy statement that just evaluates the size at a safe point and ensures it is not deferred until e.g. within a deeper conditional context (c++/43555). We expect nothing to be needed here for PARM or TYPENAME. Evaluating the size at this point for TYPENAME would actually be incorrect, as we might be in the middle of an expression with side effects on the pointed-to type size "arguments" prior to the pointer declaration point and the size evaluation could end up prior to the side effects. */ if (!TYPE_NAME (type) && (decl_context == NORMAL || decl_context == FIELD) && at_function_scope_p () && variably_modified_type_p (type, NULL_TREE)) { TYPE_NAME (type) = build_decl (UNKNOWN_LOCATION, TYPE_DECL, NULL_TREE, type); add_decl_expr (TYPE_NAME (type)); } if (declarator->kind == cdk_reference) { /* In C++0x, the type we are creating a reference to might be a typedef which is itself a reference type. In that case, we follow the reference collapsing rules in [7.1.3/8 dcl.typedef] to create the final reference type: "If a typedef TD names a type that is a reference to a type T, an attempt to create the type 'lvalue reference to cv TD' creates the type 'lvalue reference to T,' while an attempt to create the type "rvalue reference to cv TD' creates the type TD." */ if (VOID_TYPE_P (type)) /* We already gave an error. */; else if (TYPE_REF_P (type)) { if (declarator->u.reference.rvalue_ref) /* Leave type alone. */; else type = cp_build_reference_type (TREE_TYPE (type), false); } else type = cp_build_reference_type (type, declarator->u.reference.rvalue_ref); /* In C++0x, we need this check for direct reference to reference declarations, which are forbidden by [8.3.2/5 dcl.ref]. Reference to reference declarations are only allowed indirectly through typedefs and template type arguments. Example: void foo(int & &); // invalid ref-to-ref decl typedef int & int_ref; void foo(int_ref &); // valid ref-to-ref decl */ if (inner_declarator && inner_declarator->kind == cdk_reference) error ("cannot declare reference to %q#T, which is not " "a typedef or a template type argument", type); } else if (TREE_CODE (type) == METHOD_TYPE) type = build_ptrmemfunc_type (build_pointer_type (type)); else if (declarator->kind == cdk_ptrmem) { gcc_assert (TREE_CODE (declarator->u.pointer.class_type) != NAMESPACE_DECL); if (declarator->u.pointer.class_type == error_mark_node) /* We will already have complained. */ type = error_mark_node; else type = build_ptrmem_type (declarator->u.pointer.class_type, type); } else type = build_pointer_type (type); /* Process a list of type modifier keywords (such as const or volatile) that were given inside the `*' or `&'. */ if (declarator->u.pointer.qualifiers) { type = cp_build_qualified_type (type, declarator->u.pointer.qualifiers); type_quals = cp_type_quals (type); } /* Apply C++11 attributes to the pointer, and not to the type pointed to. This is unlike what is done for GNU attributes above. It is to comply with [dcl.ptr]/1: [the optional attribute-specifier-seq (7.6.1) appertains to the pointer and not to the object pointed to]. */ if (declarator->std_attributes) decl_attributes (&type, declarator->std_attributes, 0); ctype = NULL_TREE; break; case cdk_error: break; default: gcc_unreachable (); } } id_loc = declarator ? declarator->id_loc : input_location; /* A `constexpr' specifier used in an object declaration declares the object as `const'. */ if (constexpr_p && innermost_code != cdk_function) { /* DR1688 says that a `constexpr' specifier in combination with `volatile' is valid. */ if (!TYPE_REF_P (type)) { type_quals |= TYPE_QUAL_CONST; type = cp_build_qualified_type (type, type_quals); } } if (unqualified_id && TREE_CODE (unqualified_id) == TEMPLATE_ID_EXPR && !FUNC_OR_METHOD_TYPE_P (type) && !variable_template_p (TREE_OPERAND (unqualified_id, 0))) { error ("template-id %qD used as a declarator", unqualified_id); unqualified_id = dname; } /* If TYPE is a FUNCTION_TYPE, but the function name was explicitly qualified with a class-name, turn it into a METHOD_TYPE, unless we know that the function is static. We take advantage of this opportunity to do other processing that pertains to entities explicitly declared to be class members. Note that if DECLARATOR is non-NULL, we know it is a cdk_id declarator; otherwise, we would not have exited the loop above. */ if (declarator && declarator->kind == cdk_id && declarator->u.id.qualifying_scope && MAYBE_CLASS_TYPE_P (declarator->u.id.qualifying_scope)) { ctype = declarator->u.id.qualifying_scope; ctype = TYPE_MAIN_VARIANT (ctype); template_count = num_template_headers_for_class (ctype); if (ctype == current_class_type) { if (friendp) { permerror (declspecs->locations[ds_friend], "member functions are implicitly " "friends of their class"); friendp = 0; } else permerror (id_loc, "extra qualification %<%T::%> on member %qs", ctype, name); } else if (/* If the qualifying type is already complete, then we can skip the following checks. */ !COMPLETE_TYPE_P (ctype) && (/* If the function is being defined, then qualifying type must certainly be complete. */ funcdef_flag /* A friend declaration of "T::f" is OK, even if "T" is a template parameter. But, if this function is not a friend, the qualifying type must be a class. */ || (!friendp && !CLASS_TYPE_P (ctype)) /* For a declaration, the type need not be complete, if either it is dependent (since there is no meaningful definition of complete in that case) or the qualifying class is currently being defined. */ || !(dependent_type_p (ctype) || currently_open_class (ctype))) /* Check that the qualifying type is complete. */ && !complete_type_or_else (ctype, NULL_TREE)) return error_mark_node; else if (TREE_CODE (type) == FUNCTION_TYPE) { if (current_class_type && (!friendp || funcdef_flag || initialized)) { error_at (id_loc, funcdef_flag || initialized ? G_("cannot define member function %<%T::%s%> " "within %qT") : G_("cannot declare member function %<%T::%s%> " "within %qT"), ctype, name, current_class_type); return error_mark_node; } } else if (typedef_p && current_class_type) { error_at (id_loc, "cannot declare member %<%T::%s%> within %qT", ctype, name, current_class_type); return error_mark_node; } } if (ctype == NULL_TREE && decl_context == FIELD && friendp == 0) ctype = current_class_type; /* Now TYPE has the actual type. */ if (returned_attrs) { if (attrlist) *attrlist = chainon (returned_attrs, *attrlist); else attrlist = &returned_attrs; } if (declarator && declarator->kind == cdk_id && declarator->std_attributes && attrlist != NULL) { /* [dcl.meaning]/1: The optional attribute-specifier-seq following a declarator-id appertains to the entity that is declared. */ if (declarator->std_attributes != error_mark_node) *attrlist = chainon (*attrlist, declarator->std_attributes); else /* We should have already diagnosed the issue (c++/78344). */ gcc_assert (seen_error ()); } /* Handle parameter packs. */ if (parameter_pack_p) { if (decl_context == PARM) /* Turn the type into a pack expansion.*/ type = make_pack_expansion (type); else error ("non-parameter %qs cannot be a parameter pack", name); } if ((decl_context == FIELD || decl_context == PARM) && !processing_template_decl && variably_modified_type_p (type, NULL_TREE)) { if (decl_context == FIELD) error_at (id_loc, "data member may not have variably modified type %qT", type); else error_at (id_loc, "parameter may not have variably modified type %qT", type); type = error_mark_node; } if (explicitp == 1 || (explicitp && friendp)) { /* [dcl.fct.spec] (C++11) The explicit specifier shall be used only in the declaration of a constructor or conversion function within a class definition. */ if (!current_class_type) error_at (declspecs->locations[ds_explicit], "%<explicit%> outside class declaration"); else if (friendp) error_at (declspecs->locations[ds_explicit], "%<explicit%> in friend declaration"); else error_at (declspecs->locations[ds_explicit], "only declarations of constructors and conversion operators " "can be %<explicit%>"); explicitp = 0; } if (storage_class == sc_mutable) { location_t sloc = declspecs->locations[ds_storage_class]; if (decl_context != FIELD || friendp) { error_at (sloc, "non-member %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } else if (decl_context == TYPENAME || typedef_p) { error_at (sloc, "non-object member %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } else if (FUNC_OR_METHOD_TYPE_P (type)) { error_at (sloc, "function %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } else if (staticp) { error_at (sloc, "%<static%> %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } else if (type_quals & TYPE_QUAL_CONST) { error_at (sloc, "%<const%> %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } else if (TYPE_REF_P (type)) { permerror (sloc, "reference %qs cannot be declared %<mutable%>", name); storage_class = sc_none; } } /* If this is declaring a typedef name, return a TYPE_DECL. */ if (typedef_p && decl_context != TYPENAME) { bool alias_p = decl_spec_seq_has_spec_p (declspecs, ds_alias); tree decl; if (funcdef_flag) { if (decl_context == NORMAL) error_at (id_loc, "typedef may not be a function definition"); else error_at (id_loc, "typedef may not be a member function definition"); return error_mark_node; } /* This declaration: typedef void f(int) const; declares a function type which is not a member of any particular class, but which is cv-qualified; for example "f S::*" declares a pointer to a const-qualified member function of S. We record the cv-qualification in the function type. */ if ((rqual || memfn_quals) && TREE_CODE (type) == FUNCTION_TYPE) { type = apply_memfn_quals (type, memfn_quals, rqual); /* We have now dealt with these qualifiers. */ memfn_quals = TYPE_UNQUALIFIED; rqual = REF_QUAL_NONE; } if (type_uses_auto (type)) { if (alias_p) error_at (declspecs->locations[ds_type_spec], "%<auto%> not allowed in alias declaration"); else error_at (declspecs->locations[ds_type_spec], "typedef declared %<auto%>"); type = error_mark_node; } if (reqs) error_at (location_of (reqs), "requires-clause on typedef"); if (id_declarator && declarator->u.id.qualifying_scope) { error_at (id_loc, "typedef name may not be a nested-name-specifier"); type = error_mark_node; } if (decl_context == FIELD) decl = build_lang_decl_loc (id_loc, TYPE_DECL, unqualified_id, type); else decl = build_decl (id_loc, TYPE_DECL, unqualified_id, type); if (decl_context != FIELD) { if (!current_function_decl) DECL_CONTEXT (decl) = FROB_CONTEXT (current_namespace); else if (DECL_MAYBE_IN_CHARGE_CDTOR_P (current_function_decl)) /* The TYPE_DECL is "abstract" because there will be clones of this constructor/destructor, and there will be copies of this TYPE_DECL generated in those clones. The decloning optimization (for space) may revert this subsequently if it determines that the clones should share a common implementation. */ DECL_ABSTRACT_P (decl) = true; } else if (current_class_type && constructor_name_p (unqualified_id, current_class_type)) permerror (id_loc, "ISO C++ forbids nested type %qD with same name " "as enclosing class", unqualified_id); /* If the user declares "typedef struct {...} foo" then the struct will have an anonymous name. Fill that name in now. Nothing can refer to it, so nothing needs know about the name change. */ if (type != error_mark_node && unqualified_id && TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && TYPE_UNNAMED_P (type) && declspecs->type_definition_p && attributes_naming_typedef_ok (*attrlist) && cp_type_quals (type) == TYPE_UNQUALIFIED) name_unnamed_type (type, decl); if (signed_p || (typedef_decl && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl))) C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1; bad_specifiers (decl, BSP_TYPE, virtualp, memfn_quals != TYPE_UNQUALIFIED, inlinep, friendp, raises != NULL_TREE, declspecs->locations); if (alias_p) /* Acknowledge that this was written: `using analias = atype;'. */ TYPE_DECL_ALIAS_P (decl) = 1; return decl; } /* Detect the case of an array type of unspecified size which came, as such, direct from a typedef name. We must copy the type, so that the array's domain can be individually set by the object's initializer. */ if (type && typedef_type && TREE_CODE (type) == ARRAY_TYPE && !TYPE_DOMAIN (type) && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (typedef_type)) type = build_cplus_array_type (TREE_TYPE (type), NULL_TREE); /* Detect where we're using a typedef of function type to declare a function. PARMS will not be set, so we must create it now. */ if (type == typedef_type && TREE_CODE (type) == FUNCTION_TYPE) { tree decls = NULL_TREE; tree args; for (args = TYPE_ARG_TYPES (type); args && args != void_list_node; args = TREE_CHAIN (args)) { tree decl = cp_build_parm_decl (NULL_TREE, NULL_TREE, TREE_VALUE (args)); DECL_CHAIN (decl) = decls; decls = decl; } parms = nreverse (decls); if (decl_context != TYPENAME) { /* The qualifiers on the function type become the qualifiers on the non-static member function. */ memfn_quals |= type_memfn_quals (type); rqual = type_memfn_rqual (type); type_quals = TYPE_UNQUALIFIED; raises = TYPE_RAISES_EXCEPTIONS (type); } } /* If this is a type name (such as, in a cast or sizeof), compute the type and return it now. */ if (decl_context == TYPENAME) { /* Note that here we don't care about type_quals. */ /* Special case: "friend class foo" looks like a TYPENAME context. */ if (friendp) { if (inlinep) { error ("%<inline%> specified for friend class declaration"); inlinep = 0; } if (!current_aggr) { /* Don't allow friend declaration without a class-key. */ if (TREE_CODE (type) == TEMPLATE_TYPE_PARM) permerror (input_location, "template parameters cannot be friends"); else if (TREE_CODE (type) == TYPENAME_TYPE) permerror (input_location, "friend declaration requires class-key, " "i.e. %<friend class %T::%D%>", TYPE_CONTEXT (type), TYPENAME_TYPE_FULLNAME (type)); else permerror (input_location, "friend declaration requires class-key, " "i.e. %<friend %#T%>", type); } /* Only try to do this stuff if we didn't already give up. */ if (type != integer_type_node) { /* A friendly class? */ if (current_class_type) make_friend_class (current_class_type, TYPE_MAIN_VARIANT (type), /*complain=*/true); else error ("trying to make class %qT a friend of global scope", type); type = void_type_node; } } else if (memfn_quals || rqual) { if (ctype == NULL_TREE && TREE_CODE (type) == METHOD_TYPE) ctype = TYPE_METHOD_BASETYPE (type); if (ctype) type = build_memfn_type (type, ctype, memfn_quals, rqual); /* Core issue #547: need to allow this in template type args. Allow it in general in C++11 for alias-declarations. */ else if ((template_type_arg || cxx_dialect >= cxx11) && TREE_CODE (type) == FUNCTION_TYPE) type = apply_memfn_quals (type, memfn_quals, rqual); else error ("invalid qualifiers on non-member function type"); } if (reqs) error_at (location_of (reqs), "requires-clause on type-id"); return type; } else if (unqualified_id == NULL_TREE && decl_context != PARM && decl_context != CATCHPARM && TREE_CODE (type) != UNION_TYPE && ! bitfield && innermost_code != cdk_decomp) { error ("abstract declarator %qT used as declaration", type); return error_mark_node; } if (!FUNC_OR_METHOD_TYPE_P (type)) { /* Only functions may be declared using an operator-function-id. */ if (dname && IDENTIFIER_ANY_OP_P (dname)) { error_at (id_loc, "declaration of %qD as non-function", dname); return error_mark_node; } if (reqs) error_at (location_of (reqs), "requires-clause on declaration of non-function type %qT", type); } /* We don't check parameter types here because we can emit a better error message later. */ if (decl_context != PARM) { type = check_var_type (unqualified_id, type, id_loc); if (type == error_mark_node) return error_mark_node; } /* Now create the decl, which may be a VAR_DECL, a PARM_DECL or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */ if (decl_context == PARM || decl_context == CATCHPARM) { if (ctype || in_namespace) error ("cannot use %<::%> in parameter declaration"); tree auto_node = type_uses_auto (type); if (auto_node && !(cxx_dialect >= cxx17 && template_parm_flag)) { if (cxx_dialect >= cxx14) { if (decl_context == PARM && AUTO_IS_DECLTYPE (auto_node)) error_at (typespec_loc, "cannot declare a parameter with %<decltype(auto)%>"); else error_at (typespec_loc, "%<auto%> parameter not permitted in this context"); } else error_at (typespec_loc, "parameter declared %<auto%>"); type = error_mark_node; } /* A parameter declared as an array of T is really a pointer to T. One declared as a function is really a pointer to a function. One declared as a member is really a pointer to member. */ if (TREE_CODE (type) == ARRAY_TYPE) { /* Transfer const-ness of array into that of type pointed to. */ type = build_pointer_type (TREE_TYPE (type)); type_quals = TYPE_UNQUALIFIED; array_parameter_p = true; } else if (TREE_CODE (type) == FUNCTION_TYPE) type = build_pointer_type (type); } if (ctype && TREE_CODE (type) == FUNCTION_TYPE && staticp < 2 && !(unqualified_id && identifier_p (unqualified_id) && IDENTIFIER_NEWDEL_OP_P (unqualified_id))) { cp_cv_quals real_quals = memfn_quals; if (cxx_dialect < cxx14 && constexpr_p && sfk != sfk_constructor && sfk != sfk_destructor) real_quals |= TYPE_QUAL_CONST; type = build_memfn_type (type, ctype, real_quals, rqual); } { tree decl = NULL_TREE; if (decl_context == PARM) { decl = cp_build_parm_decl (NULL_TREE, unqualified_id, type); DECL_ARRAY_PARAMETER_P (decl) = array_parameter_p; bad_specifiers (decl, BSP_PARM, virtualp, memfn_quals != TYPE_UNQUALIFIED, inlinep, friendp, raises != NULL_TREE, declspecs->locations); } else if (decl_context == FIELD) { if (!staticp && !friendp && TREE_CODE (type) != METHOD_TYPE) if (tree auto_node = type_uses_auto (type)) { location_t tloc = declspecs->locations[ds_type_spec]; if (CLASS_PLACEHOLDER_TEMPLATE (auto_node)) error_at (tloc, "invalid use of template-name %qE without an " "argument list", CLASS_PLACEHOLDER_TEMPLATE (auto_node)); else error_at (tloc, "non-static data member declared with " "placeholder %qT", auto_node); type = error_mark_node; } /* The C99 flexible array extension. */ if (!staticp && TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { if (ctype && (TREE_CODE (ctype) == UNION_TYPE || TREE_CODE (ctype) == QUAL_UNION_TYPE)) { error_at (id_loc, "flexible array member in union"); type = error_mark_node; } else { /* Array is a flexible member. */ if (name) pedwarn (id_loc, OPT_Wpedantic, "ISO C++ forbids flexible array member %qs", name); else pedwarn (input_location, OPT_Wpedantic, "ISO C++ forbids flexible array members"); /* Flexible array member has a null domain. */ type = build_cplus_array_type (TREE_TYPE (type), NULL_TREE); } } if (type == error_mark_node) { /* Happens when declaring arrays of sizes which are error_mark_node, for example. */ decl = NULL_TREE; } else if (in_namespace && !friendp) { /* Something like struct S { int N::j; }; */ error_at (id_loc, "invalid use of %<::%>"); return error_mark_node; } else if (FUNC_OR_METHOD_TYPE_P (type) && unqualified_id) { int publicp = 0; tree function_context; if (friendp == 0) { /* This should never happen in pure C++ (the check could be an assert). It could happen in Objective-C++ if someone writes invalid code that uses a function declaration for an instance variable or property (instance variables and properties are parsed as FIELD_DECLs, but they are part of an Objective-C class, not a C++ class). That code is invalid and is caught by this check. */ if (!ctype) { error ("declaration of function %qD in invalid context", unqualified_id); return error_mark_node; } /* ``A union may [ ... ] not [ have ] virtual functions.'' ARM 9.5 */ if (virtualp && TREE_CODE (ctype) == UNION_TYPE) { error_at (declspecs->locations[ds_virtual], "function %qD declared %<virtual%> inside a union", unqualified_id); return error_mark_node; } if (virtualp && identifier_p (unqualified_id) && IDENTIFIER_NEWDEL_OP_P (unqualified_id)) { error_at (declspecs->locations[ds_virtual], "%qD cannot be declared %<virtual%>, since it " "is always static", unqualified_id); virtualp = 0; } } /* Check that the name used for a destructor makes sense. */ if (sfk == sfk_destructor) { tree uqname = id_declarator->u.id.unqualified_name; if (!ctype) { gcc_assert (friendp); error_at (id_loc, "expected qualified name in friend " "declaration for destructor %qD", uqname); return error_mark_node; } if (!check_dtor_name (ctype, TREE_OPERAND (uqname, 0))) { error_at (id_loc, "declaration of %qD as member of %qT", uqname, ctype); return error_mark_node; } if (concept_p) { error_at (declspecs->locations[ds_concept], "a destructor cannot be %qs", "concept"); return error_mark_node; } if (constexpr_p && cxx_dialect < cxx20) { error_at (declspecs->locations[ds_constexpr], "%<constexpr%> destructors only available" " with %<-std=c++20%> or %<-std=gnu++20%>"); return error_mark_node; } if (consteval_p) { error_at (declspecs->locations[ds_consteval], "a destructor cannot be %qs", "consteval"); return error_mark_node; } } else if (sfk == sfk_constructor && friendp && !ctype) { error ("expected qualified name in friend declaration " "for constructor %qD", id_declarator->u.id.unqualified_name); return error_mark_node; } if (sfk == sfk_constructor) if (concept_p) { error_at (declspecs->locations[ds_concept], "a constructor cannot be %<concept%>"); return error_mark_node; } if (concept_p) { error_at (declspecs->locations[ds_concept], "a concept cannot be a member function"); concept_p = false; } else if (consteval_p && identifier_p (unqualified_id) && IDENTIFIER_NEWDEL_OP_P (unqualified_id)) { error_at (declspecs->locations[ds_consteval], "%qD cannot be %qs", unqualified_id, "consteval"); consteval_p = false; } if (TREE_CODE (unqualified_id) == TEMPLATE_ID_EXPR) { tree tmpl = TREE_OPERAND (unqualified_id, 0); if (variable_template_p (tmpl)) { error_at (id_loc, "specialization of variable template " "%qD declared as function", tmpl); inform (DECL_SOURCE_LOCATION (tmpl), "variable template declared here"); return error_mark_node; } } /* Tell grokfndecl if it needs to set TREE_PUBLIC on the node. */ function_context = (ctype != NULL_TREE ? decl_function_context (TYPE_MAIN_DECL (ctype)) : NULL_TREE); publicp = ((! friendp || ! staticp) && function_context == NULL_TREE); decl = grokfndecl (ctype, type, TREE_CODE (unqualified_id) != TEMPLATE_ID_EXPR ? unqualified_id : dname, parms, unqualified_id, declspecs, reqs, virtualp, flags, memfn_quals, rqual, raises, friendp ? -1 : 0, friendp, publicp, inlinep | (2 * constexpr_p) | (4 * concept_p) | (8 * consteval_p), initialized == SD_DELETED, sfk, funcdef_flag, late_return_type_p, template_count, in_namespace, attrlist, id_loc); decl = set_virt_specifiers (decl, virt_specifiers); if (decl == NULL_TREE) return error_mark_node; #if 0 /* This clobbers the attrs stored in `decl' from `attrlist'. */ /* The decl and setting of decl_attr is also turned off. */ decl = build_decl_attribute_variant (decl, decl_attr); #endif /* [class.conv.ctor] A constructor declared without the function-specifier explicit that can be called with a single parameter specifies a conversion from the type of its first parameter to the type of its class. Such a constructor is called a converting constructor. */ if (explicitp == 2) DECL_NONCONVERTING_P (decl) = 1; if (declspecs->explicit_specifier) store_explicit_specifier (decl, declspecs->explicit_specifier); } else if (!staticp && ((current_class_type && same_type_p (type, current_class_type)) || (!dependent_type_p (type) && !COMPLETE_TYPE_P (complete_type (type)) && (!complete_or_array_type_p (type) || initialized == SD_UNINITIALIZED)))) { if (TREE_CODE (type) != ARRAY_TYPE || !COMPLETE_TYPE_P (TREE_TYPE (type))) { if (unqualified_id) { error_at (id_loc, "field %qD has incomplete type %qT", unqualified_id, type); cxx_incomplete_type_inform (strip_array_types (type)); } else error ("name %qT has incomplete type", type); type = error_mark_node; decl = NULL_TREE; } } else if (!verify_type_context (input_location, staticp ? TCTX_STATIC_STORAGE : TCTX_FIELD, type)) { type = error_mark_node; decl = NULL_TREE; } else { if (friendp) { if (unqualified_id) error_at (id_loc, "%qE is neither function nor member function; " "cannot be declared friend", unqualified_id); else error ("unnamed field is neither function nor member " "function; cannot be declared friend"); return error_mark_node; } decl = NULL_TREE; } if (friendp) { /* Friends are treated specially. */ if (ctype == current_class_type) ; /* We already issued a permerror. */ else if (decl && DECL_NAME (decl)) { if (initialized) /* Kludge: We need funcdef_flag to be true in do_friend for in-class defaulted functions, but that breaks grokfndecl. So set it here. */ funcdef_flag = true; if (template_class_depth (current_class_type) == 0) { decl = check_explicit_specialization (unqualified_id, decl, template_count, 2 * funcdef_flag + 4); if (decl == error_mark_node) return error_mark_node; } decl = do_friend (ctype, unqualified_id, decl, *attrlist, flags, funcdef_flag); return decl; } else return error_mark_node; } /* Structure field. It may not be a function, except for C++. */ if (decl == NULL_TREE) { if (staticp) { /* C++ allows static class members. All other work for this is done by grokfield. */ decl = build_lang_decl_loc (id_loc, VAR_DECL, unqualified_id, type); set_linkage_for_static_data_member (decl); if (concept_p) error_at (declspecs->locations[ds_concept], "static data member %qE declared %qs", unqualified_id, "concept"); else if (constexpr_p && !initialized) { error_at (DECL_SOURCE_LOCATION (decl), "%<constexpr%> static data member %qD must " "have an initializer", decl); constexpr_p = false; } if (consteval_p) error_at (declspecs->locations[ds_consteval], "static data member %qE declared %qs", unqualified_id, "consteval"); if (inlinep) mark_inline_variable (decl, declspecs->locations[ds_inline]); if (!DECL_VAR_DECLARED_INLINE_P (decl) && !(cxx_dialect >= cxx17 && constexpr_p)) /* Even if there is an in-class initialization, DECL is considered undefined until an out-of-class definition is provided, unless this is an inline variable. */ DECL_EXTERNAL (decl) = 1; if (thread_p) { CP_DECL_THREAD_LOCAL_P (decl) = true; if (!processing_template_decl) set_decl_tls_model (decl, decl_default_tls_model (decl)); if (declspecs->gnu_thread_keyword_p) SET_DECL_GNU_TLS_P (decl); } } else { if (concept_p) { error_at (declspecs->locations[ds_concept], "non-static data member %qE declared %qs", unqualified_id, "concept"); concept_p = false; constexpr_p = false; } else if (constexpr_p) { error_at (declspecs->locations[ds_constexpr], "non-static data member %qE declared %qs", unqualified_id, "constexpr"); constexpr_p = false; } if (constinit_p) { error_at (declspecs->locations[ds_constinit], "non-static data member %qE declared %qs", unqualified_id, "constinit"); constinit_p = false; } if (consteval_p) { error_at (declspecs->locations[ds_consteval], "non-static data member %qE declared %qs", unqualified_id, "consteval"); consteval_p = false; } decl = build_decl (id_loc, FIELD_DECL, unqualified_id, type); DECL_NONADDRESSABLE_P (decl) = bitfield; if (bitfield && !unqualified_id) { TREE_NO_WARNING (decl) = 1; DECL_PADDING_P (decl) = 1; } if (storage_class == sc_mutable) { DECL_MUTABLE_P (decl) = 1; storage_class = sc_none; } if (initialized) { /* An attempt is being made to initialize a non-static member. This is new in C++11. */ maybe_warn_cpp0x (CPP0X_NSDMI); /* If this has been parsed with static storage class, but errors forced staticp to be cleared, ensure NSDMI is not present. */ if (declspecs->storage_class == sc_static) DECL_INITIAL (decl) = error_mark_node; } } bad_specifiers (decl, BSP_FIELD, virtualp, memfn_quals != TYPE_UNQUALIFIED, staticp ? false : inlinep, friendp, raises != NULL_TREE, declspecs->locations); } } else if (FUNC_OR_METHOD_TYPE_P (type)) { tree original_name; int publicp = 0; if (!unqualified_id) return error_mark_node; if (TREE_CODE (unqualified_id) == TEMPLATE_ID_EXPR) original_name = dname; else original_name = unqualified_id; // FIXME:gcc_assert (original_name == dname); if (storage_class == sc_auto) error_at (declspecs->locations[ds_storage_class], "storage class %<auto%> invalid for function %qs", name); else if (storage_class == sc_register) error_at (declspecs->locations[ds_storage_class], "storage class %<register%> invalid for function %qs", name); else if (thread_p) { if (declspecs->gnu_thread_keyword_p) error_at (declspecs->locations[ds_thread], "storage class %<__thread%> invalid for function %qs", name); else error_at (declspecs->locations[ds_thread], "storage class %<thread_local%> invalid for " "function %qs", name); } if (virt_specifiers) error ("virt-specifiers in %qs not allowed outside a class " "definition", name); /* Function declaration not at top level. Storage classes other than `extern' are not allowed and `extern' makes no difference. */ if (! toplevel_bindings_p () && (storage_class == sc_static || decl_spec_seq_has_spec_p (declspecs, ds_inline)) && pedantic) { if (storage_class == sc_static) pedwarn (declspecs->locations[ds_storage_class], OPT_Wpedantic, "%<static%> specifier invalid for function %qs " "declared out of global scope", name); else pedwarn (declspecs->locations[ds_inline], OPT_Wpedantic, "%<inline%> specifier invalid for function %qs " "declared out of global scope", name); } if (ctype == NULL_TREE) { if (virtualp) { error ("virtual non-class function %qs", name); virtualp = 0; } else if (sfk == sfk_constructor || sfk == sfk_destructor) { error (funcdef_flag ? G_("%qs defined in a non-class scope") : G_("%qs declared in a non-class scope"), name); sfk = sfk_none; } } if (consteval_p && identifier_p (unqualified_id) && IDENTIFIER_NEWDEL_OP_P (unqualified_id)) { error_at (declspecs->locations[ds_consteval], "%qD cannot be %qs", unqualified_id, "consteval"); consteval_p = false; } /* Record whether the function is public. */ publicp = (ctype != NULL_TREE || storage_class != sc_static); decl = grokfndecl (ctype, type, original_name, parms, unqualified_id, declspecs, reqs, virtualp, flags, memfn_quals, rqual, raises, 1, friendp, publicp, inlinep | (2 * constexpr_p) | (4 * concept_p) | (8 * consteval_p), initialized == SD_DELETED, sfk, funcdef_flag, late_return_type_p, template_count, in_namespace, attrlist, id_loc); if (decl == NULL_TREE) return error_mark_node; if (explicitp == 2) DECL_NONCONVERTING_P (decl) = 1; if (staticp == 1) { int invalid_static = 0; /* Don't allow a static member function in a class, and forbid declaring main to be static. */ if (TREE_CODE (type) == METHOD_TYPE) { permerror (input_location, "cannot declare member function %qD to have " "static linkage", decl); invalid_static = 1; } else if (current_function_decl) { /* 7.1.1: There can be no static function declarations within a block. */ error_at (declspecs->locations[ds_storage_class], "cannot declare static function inside another function"); invalid_static = 1; } if (invalid_static) { staticp = 0; storage_class = sc_none; } } } else { /* It's a variable. */ /* An uninitialized decl with `extern' is a reference. */ decl = grokvardecl (type, dname, unqualified_id, declspecs, initialized, type_quals, inlinep, concept_p, template_count, ctype ? ctype : in_namespace, id_loc); if (decl == NULL_TREE) return error_mark_node; bad_specifiers (decl, BSP_VAR, virtualp, memfn_quals != TYPE_UNQUALIFIED, inlinep, friendp, raises != NULL_TREE, declspecs->locations); if (ctype) { DECL_CONTEXT (decl) = ctype; if (staticp == 1) { permerror (declspecs->locations[ds_storage_class], "%<static%> may not be used when defining " "(as opposed to declaring) a static data member"); staticp = 0; storage_class = sc_none; } if (storage_class == sc_register && TREE_STATIC (decl)) { error ("static member %qD declared %<register%>", decl); storage_class = sc_none; } if (storage_class == sc_extern && pedantic) { pedwarn (input_location, OPT_Wpedantic, "cannot explicitly declare member %q#D to have " "extern linkage", decl); storage_class = sc_none; } } else if (constexpr_p && DECL_EXTERNAL (decl)) { error_at (DECL_SOURCE_LOCATION (decl), "declaration of %<constexpr%> variable %qD " "is not a definition", decl); constexpr_p = false; } if (consteval_p) { error_at (DECL_SOURCE_LOCATION (decl), "a variable cannot be declared %<consteval%>"); consteval_p = false; } if (inlinep) mark_inline_variable (decl, declspecs->locations[ds_inline]); if (innermost_code == cdk_decomp) { gcc_assert (declarator && declarator->kind == cdk_decomp); DECL_SOURCE_LOCATION (decl) = id_loc; DECL_ARTIFICIAL (decl) = 1; fit_decomposition_lang_decl (decl, NULL_TREE); } } if (VAR_P (decl) && !initialized) if (tree auto_node = type_uses_auto (type)) if (!CLASS_PLACEHOLDER_TEMPLATE (auto_node)) { location_t loc = declspecs->locations[ds_type_spec]; error_at (loc, "declaration of %q#D has no initializer", decl); TREE_TYPE (decl) = error_mark_node; } if (storage_class == sc_extern && initialized && !funcdef_flag) { if (toplevel_bindings_p ()) { /* It's common practice (and completely valid) to have a const be initialized and declared extern. */ if (!(type_quals & TYPE_QUAL_CONST)) warning_at (DECL_SOURCE_LOCATION (decl), 0, "%qs initialized and declared %<extern%>", name); } else { error_at (DECL_SOURCE_LOCATION (decl), "%qs has both %<extern%> and initializer", name); return error_mark_node; } } /* Record `register' declaration for warnings on & and in case doing stupid register allocation. */ if (storage_class == sc_register) { DECL_REGISTER (decl) = 1; /* Warn about register storage specifiers on PARM_DECLs. */ if (TREE_CODE (decl) == PARM_DECL) { if (cxx_dialect >= cxx17) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wregister, "ISO C++17 does not allow %<register%> storage " "class specifier"); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wregister, "%<register%> storage class specifier used"); } } else if (storage_class == sc_extern) DECL_THIS_EXTERN (decl) = 1; else if (storage_class == sc_static) DECL_THIS_STATIC (decl) = 1; if (VAR_P (decl)) { /* Set constexpr flag on vars (functions got it in grokfndecl). */ if (constexpr_p) DECL_DECLARED_CONSTEXPR_P (decl) = true; /* And the constinit flag (which only applies to variables). */ else if (constinit_p) DECL_DECLARED_CONSTINIT_P (decl) = true; } /* Record constancy and volatility on the DECL itself . There's no need to do this when processing a template; we'll do this for the instantiated declaration based on the type of DECL. */ if (!processing_template_decl) cp_apply_type_quals_to_decl (type_quals, decl); return decl; } } /* Subroutine of start_function. Ensure that each of the parameter types (as listed in PARMS) is complete, as is required for a function definition. */ static void require_complete_types_for_parms (tree parms) { for (; parms; parms = DECL_CHAIN (parms)) { if (dependent_type_p (TREE_TYPE (parms))) continue; if (!VOID_TYPE_P (TREE_TYPE (parms)) && complete_type_or_else (TREE_TYPE (parms), parms)) { relayout_decl (parms); DECL_ARG_TYPE (parms) = type_passed_as (TREE_TYPE (parms)); maybe_warn_parm_abi (TREE_TYPE (parms), DECL_SOURCE_LOCATION (parms)); } else /* grokparms or complete_type_or_else will have already issued an error. */ TREE_TYPE (parms) = error_mark_node; } } /* Returns nonzero if T is a local variable. */ int local_variable_p (const_tree t) { if ((VAR_P (t) && (DECL_LOCAL_DECL_P (t) || !DECL_CONTEXT (t) || TREE_CODE (DECL_CONTEXT (t)) == FUNCTION_DECL)) || (TREE_CODE (t) == PARM_DECL)) return 1; return 0; } /* Like local_variable_p, but suitable for use as a tree-walking function. */ static tree local_variable_p_walkfn (tree *tp, int *walk_subtrees, void * /*data*/) { if (local_variable_p (*tp) && (!DECL_ARTIFICIAL (*tp) || DECL_NAME (*tp) == this_identifier)) return *tp; else if (TYPE_P (*tp)) *walk_subtrees = 0; return NULL_TREE; } /* Check that ARG, which is a default-argument expression for a parameter DECL, is valid. Returns ARG, or ERROR_MARK_NODE, if something goes wrong. DECL may also be a _TYPE node, rather than a DECL, if there is no DECL available. */ tree check_default_argument (tree decl, tree arg, tsubst_flags_t complain) { tree var; tree decl_type; if (TREE_CODE (arg) == DEFERRED_PARSE) /* We get a DEFERRED_PARSE when looking at an in-class declaration with a default argument. Ignore the argument for now; we'll deal with it after the class is complete. */ return arg; if (TYPE_P (decl)) { decl_type = decl; decl = NULL_TREE; } else decl_type = TREE_TYPE (decl); if (arg == error_mark_node || decl == error_mark_node || TREE_TYPE (arg) == error_mark_node || decl_type == error_mark_node) /* Something already went wrong. There's no need to check further. */ return error_mark_node; /* [dcl.fct.default] A default argument expression is implicitly converted to the parameter type. */ ++cp_unevaluated_operand; /* Avoid digest_init clobbering the initializer. */ tree carg = BRACE_ENCLOSED_INITIALIZER_P (arg) ? unshare_expr (arg): arg; perform_implicit_conversion_flags (decl_type, carg, complain, LOOKUP_IMPLICIT); --cp_unevaluated_operand; /* Avoid redundant -Wzero-as-null-pointer-constant warnings at the call sites. */ if (TYPE_PTR_OR_PTRMEM_P (decl_type) && null_ptr_cst_p (arg) /* Don't lose side-effects as in PR90473. */ && !TREE_SIDE_EFFECTS (arg)) return nullptr_node; /* [dcl.fct.default] Local variables shall not be used in default argument expressions. The keyword `this' shall not be used in a default argument of a member function. */ var = cp_walk_tree_without_duplicates (&arg, local_variable_p_walkfn, NULL); if (var) { if (complain & tf_warning_or_error) { if (DECL_NAME (var) == this_identifier) permerror (input_location, "default argument %qE uses %qD", arg, var); else error ("default argument %qE uses local variable %qD", arg, var); } return error_mark_node; } /* All is well. */ return arg; } /* Returns a deprecated type used within TYPE, or NULL_TREE if none. */ static tree type_is_deprecated (tree type) { enum tree_code code; if (TREE_DEPRECATED (type)) return type; if (TYPE_NAME (type)) { if (TREE_DEPRECATED (TYPE_NAME (type))) return type; else { cp_warn_deprecated_use_scopes (CP_DECL_CONTEXT (TYPE_NAME (type))); return NULL_TREE; } } /* Do warn about using typedefs to a deprecated class. */ if (OVERLOAD_TYPE_P (type) && type != TYPE_MAIN_VARIANT (type)) return type_is_deprecated (TYPE_MAIN_VARIANT (type)); code = TREE_CODE (type); if (code == POINTER_TYPE || code == REFERENCE_TYPE || code == OFFSET_TYPE || code == FUNCTION_TYPE || code == METHOD_TYPE || code == ARRAY_TYPE) return type_is_deprecated (TREE_TYPE (type)); if (TYPE_PTRMEMFUNC_P (type)) return type_is_deprecated (TREE_TYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (type)))); return NULL_TREE; } /* Decode the list of parameter types for a function type. Given the list of things declared inside the parens, return a list of types. If this parameter does not end with an ellipsis, we append void_list_node. *PARMS is set to the chain of PARM_DECLs created. */ tree grokparms (tree parmlist, tree *parms) { tree result = NULL_TREE; tree decls = NULL_TREE; tree parm; int any_error = 0; for (parm = parmlist; parm != NULL_TREE; parm = TREE_CHAIN (parm)) { tree type = NULL_TREE; tree init = TREE_PURPOSE (parm); tree decl = TREE_VALUE (parm); if (parm == void_list_node) break; if (! decl || TREE_TYPE (decl) == error_mark_node) { any_error = 1; continue; } type = TREE_TYPE (decl); if (VOID_TYPE_P (type)) { if (same_type_p (type, void_type_node) && !init && !DECL_NAME (decl) && !result && TREE_CHAIN (parm) == void_list_node) /* DR 577: A parameter list consisting of a single unnamed parameter of non-dependent type 'void'. */ break; else if (cv_qualified_p (type)) error_at (DECL_SOURCE_LOCATION (decl), "invalid use of cv-qualified type %qT in " "parameter declaration", type); else error_at (DECL_SOURCE_LOCATION (decl), "invalid use of type %<void%> in parameter " "declaration"); /* It's not a good idea to actually create parameters of type `void'; other parts of the compiler assume that a void type terminates the parameter list. */ type = error_mark_node; TREE_TYPE (decl) = error_mark_node; } if (type != error_mark_node) { if (deprecated_state != DEPRECATED_SUPPRESS) { tree deptype = type_is_deprecated (type); if (deptype) cp_warn_deprecated_use (deptype); } /* [dcl.fct] "A parameter with volatile-qualified type is deprecated." */ if (CP_TYPE_VOLATILE_P (type)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wvolatile, "%<volatile%>-qualified parameter is " "deprecated"); /* Top-level qualifiers on the parameters are ignored for function types. */ type = cp_build_qualified_type (type, 0); if (TREE_CODE (type) == METHOD_TYPE) { error ("parameter %qD invalidly declared method type", decl); type = build_pointer_type (type); TREE_TYPE (decl) = type; } else if (abstract_virtuals_error (decl, type)) /* Ignore any default argument. */ init = NULL_TREE; else if (cxx_dialect < cxx17 && INDIRECT_TYPE_P (type)) { /* Before C++17 DR 393: [dcl.fct]/6, parameter types cannot contain pointers (references) to arrays of unknown bound. */ tree t = TREE_TYPE (type); int ptr = TYPE_PTR_P (type); while (1) { if (TYPE_PTR_P (t)) ptr = 1; else if (TREE_CODE (t) != ARRAY_TYPE) break; else if (!TYPE_DOMAIN (t)) break; t = TREE_TYPE (t); } if (TREE_CODE (t) == ARRAY_TYPE) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wpedantic, ptr ? G_("parameter %qD includes pointer to array of " "unknown bound %qT") : G_("parameter %qD includes reference to array of " "unknown bound %qT"), decl, t); } if (init && !processing_template_decl) init = check_default_argument (decl, init, tf_warning_or_error); } DECL_CHAIN (decl) = decls; decls = decl; result = tree_cons (init, type, result); } decls = nreverse (decls); result = nreverse (result); if (parm) result = chainon (result, void_list_node); *parms = decls; if (any_error) result = NULL_TREE; if (any_error) /* We had parm errors, recover by giving the function (...) type. */ result = NULL_TREE; return result; } /* D is a constructor or overloaded `operator='. Let T be the class in which D is declared. Then, this function returns: -1 if D's is an ill-formed constructor or copy assignment operator whose first parameter is of type `T'. 0 if D is not a copy constructor or copy assignment operator. 1 if D is a copy constructor or copy assignment operator whose first parameter is a reference to non-const qualified T. 2 if D is a copy constructor or copy assignment operator whose first parameter is a reference to const qualified T. This function can be used as a predicate. Positive values indicate a copy constructor and nonzero values indicate a copy assignment operator. */ int copy_fn_p (const_tree d) { tree args; tree arg_type; int result = 1; gcc_assert (DECL_FUNCTION_MEMBER_P (d)); if (TREE_CODE (d) == TEMPLATE_DECL || (DECL_TEMPLATE_INFO (d) && DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (d)))) /* Instantiations of template member functions are never copy functions. Note that member functions of templated classes are represented as template functions internally, and we must accept those as copy functions. */ return 0; if (!DECL_CONSTRUCTOR_P (d) && DECL_NAME (d) != assign_op_identifier) return 0; args = FUNCTION_FIRST_USER_PARMTYPE (d); if (!args) return 0; arg_type = TREE_VALUE (args); if (arg_type == error_mark_node) return 0; if (TYPE_MAIN_VARIANT (arg_type) == DECL_CONTEXT (d)) { /* Pass by value copy assignment operator. */ result = -1; } else if (TYPE_REF_P (arg_type) && !TYPE_REF_IS_RVALUE (arg_type) && TYPE_MAIN_VARIANT (TREE_TYPE (arg_type)) == DECL_CONTEXT (d)) { if (CP_TYPE_CONST_P (TREE_TYPE (arg_type))) result = 2; } else return 0; args = TREE_CHAIN (args); if (args && args != void_list_node && !TREE_PURPOSE (args)) /* There are more non-optional args. */ return 0; return result; } /* D is a constructor or overloaded `operator='. Let T be the class in which D is declared. Then, this function returns true when D is a move constructor or move assignment operator, false otherwise. */ bool move_fn_p (const_tree d) { gcc_assert (DECL_FUNCTION_MEMBER_P (d)); if (cxx_dialect == cxx98) /* There are no move constructors if we are in C++98 mode. */ return false; if (TREE_CODE (d) == TEMPLATE_DECL || (DECL_TEMPLATE_INFO (d) && DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (d)))) /* Instantiations of template member functions are never move functions. Note that member functions of templated classes are represented as template functions internally, and we must accept those as move functions. */ return 0; return move_signature_fn_p (d); } /* D is a constructor or overloaded `operator='. Then, this function returns true when D has the same signature as a move constructor or move assignment operator (because either it is such a ctor/op= or it is a template specialization with the same signature), false otherwise. */ bool move_signature_fn_p (const_tree d) { tree args; tree arg_type; bool result = false; if (!DECL_CONSTRUCTOR_P (d) && DECL_NAME (d) != assign_op_identifier) return 0; args = FUNCTION_FIRST_USER_PARMTYPE (d); if (!args) return 0; arg_type = TREE_VALUE (args); if (arg_type == error_mark_node) return 0; if (TYPE_REF_P (arg_type) && TYPE_REF_IS_RVALUE (arg_type) && same_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (arg_type)), DECL_CONTEXT (d))) result = true; args = TREE_CHAIN (args); if (args && args != void_list_node && !TREE_PURPOSE (args)) /* There are more non-optional args. */ return false; return result; } /* Remember any special properties of member function DECL. */ void grok_special_member_properties (tree decl) { tree class_type; if (TREE_CODE (decl) == USING_DECL || !DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) return; class_type = DECL_CONTEXT (decl); if (IDENTIFIER_CTOR_P (DECL_NAME (decl))) { int ctor = copy_fn_p (decl); if (!DECL_ARTIFICIAL (decl)) TYPE_HAS_USER_CONSTRUCTOR (class_type) = 1; if (ctor > 0) { /* [class.copy] A non-template constructor for class X is a copy constructor if its first parameter is of type X&, const X&, volatile X& or const volatile X&, and either there are no other parameters or else all other parameters have default arguments. */ TYPE_HAS_COPY_CTOR (class_type) = 1; if (ctor > 1) TYPE_HAS_CONST_COPY_CTOR (class_type) = 1; } else if (sufficient_parms_p (FUNCTION_FIRST_USER_PARMTYPE (decl))) TYPE_HAS_DEFAULT_CONSTRUCTOR (class_type) = 1; else if (is_list_ctor (decl)) TYPE_HAS_LIST_CTOR (class_type) = 1; if (DECL_DECLARED_CONSTEXPR_P (decl) && !ctor && !move_fn_p (decl)) TYPE_HAS_CONSTEXPR_CTOR (class_type) = 1; } else if (DECL_NAME (decl) == assign_op_identifier) { /* [class.copy] A non-template assignment operator for class X is a copy assignment operator if its parameter is of type X, X&, const X&, volatile X& or const volatile X&. */ int assop = copy_fn_p (decl); if (assop) { TYPE_HAS_COPY_ASSIGN (class_type) = 1; if (assop != 1) TYPE_HAS_CONST_COPY_ASSIGN (class_type) = 1; } } else if (IDENTIFIER_CONV_OP_P (DECL_NAME (decl))) TYPE_HAS_CONVERSION (class_type) = true; /* Destructors are handled in check_methods. */ } /* Check a constructor DECL has the correct form. Complains if the class has a constructor of the form X(X). */ bool grok_ctor_properties (const_tree ctype, const_tree decl) { int ctor_parm = copy_fn_p (decl); if (ctor_parm < 0) { /* [class.copy] A declaration of a constructor for a class X is ill-formed if its first parameter is of type (optionally cv-qualified) X and either there are no other parameters or else all other parameters have default arguments. We *don't* complain about member template instantiations that have this form, though; they can occur as we try to decide what constructor to use during overload resolution. Since overload resolution will never prefer such a constructor to the non-template copy constructor (which is either explicitly or implicitly defined), there's no need to worry about their existence. Theoretically, they should never even be instantiated, but that's hard to forestall. */ error_at (DECL_SOURCE_LOCATION (decl), "invalid constructor; you probably meant %<%T (const %T&)%>", ctype, ctype); return false; } return true; } /* DECL is a declaration for an overloaded or conversion operator. If COMPLAIN is true, errors are issued for invalid declarations. */ bool grok_op_properties (tree decl, bool complain) { tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (decl)); bool methodp = TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE; tree name = DECL_NAME (decl); location_t loc = DECL_SOURCE_LOCATION (decl); tree class_type = DECL_CONTEXT (decl); if (class_type && !CLASS_TYPE_P (class_type)) class_type = NULL_TREE; tree_code operator_code; unsigned op_flags; if (IDENTIFIER_CONV_OP_P (name)) { /* Conversion operators are TYPE_EXPR for the purposes of this function. */ operator_code = TYPE_EXPR; op_flags = OVL_OP_FLAG_UNARY; } else { const ovl_op_info_t *ovl_op = IDENTIFIER_OVL_OP_INFO (name); operator_code = ovl_op->tree_code; op_flags = ovl_op->flags; gcc_checking_assert (operator_code != ERROR_MARK); DECL_OVERLOADED_OPERATOR_CODE_RAW (decl) = ovl_op->ovl_op_code; } if (op_flags & OVL_OP_FLAG_ALLOC) { /* operator new and operator delete are quite special. */ if (class_type) switch (op_flags) { case OVL_OP_FLAG_ALLOC: TYPE_HAS_NEW_OPERATOR (class_type) = 1; break; case OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE: TYPE_GETS_DELETE (class_type) |= 1; break; case OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_VEC: TYPE_HAS_ARRAY_NEW_OPERATOR (class_type) = 1; break; case OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE | OVL_OP_FLAG_VEC: TYPE_GETS_DELETE (class_type) |= 2; break; default: gcc_unreachable (); } /* [basic.std.dynamic.allocation]/1: A program is ill-formed if an allocation function is declared in a namespace scope other than global scope or declared static in global scope. The same also holds true for deallocation functions. */ if (DECL_NAMESPACE_SCOPE_P (decl)) { if (CP_DECL_CONTEXT (decl) != global_namespace) { error_at (loc, "%qD may not be declared within a namespace", decl); return false; } if (!TREE_PUBLIC (decl)) { error_at (loc, "%qD may not be declared as static", decl); return false; } } if (op_flags & OVL_OP_FLAG_DELETE) { DECL_SET_IS_OPERATOR_DELETE (decl, true); coerce_delete_type (decl, loc); } else { DECL_SET_IS_OPERATOR_NEW (decl, true); TREE_TYPE (decl) = coerce_new_type (TREE_TYPE (decl), loc); } return true; } /* An operator function must either be a non-static member function or have at least one parameter of a class, a reference to a class, an enumeration, or a reference to an enumeration. 13.4.0.6 */ if (! methodp || DECL_STATIC_FUNCTION_P (decl)) { if (operator_code == TYPE_EXPR || operator_code == CALL_EXPR || operator_code == COMPONENT_REF || operator_code == ARRAY_REF || operator_code == NOP_EXPR) { error_at (loc, "%qD must be a non-static member function", decl); return false; } if (DECL_STATIC_FUNCTION_P (decl)) { error_at (loc, "%qD must be either a non-static member " "function or a non-member function", decl); return false; } for (tree arg = argtypes; ; arg = TREE_CHAIN (arg)) { if (!arg || arg == void_list_node) { if (complain) error_at(loc, "%qD must have an argument of class or " "enumerated type", decl); return false; } tree type = non_reference (TREE_VALUE (arg)); if (type == error_mark_node) return false; /* MAYBE_CLASS_TYPE_P, rather than CLASS_TYPE_P, is used because these checks are performed even on template functions. */ if (MAYBE_CLASS_TYPE_P (type) || TREE_CODE (type) == ENUMERAL_TYPE) break; } } if (operator_code == CALL_EXPR) /* There are no further restrictions on the arguments to an overloaded "operator ()". */ return true; if (operator_code == COND_EXPR) { /* 13.4.0.3 */ error_at (loc, "ISO C++ prohibits overloading %<operator ?:%>"); return false; } /* Count the number of arguments and check for ellipsis. */ int arity = 0; for (tree arg = argtypes; arg != void_list_node; arg = TREE_CHAIN (arg)) { if (!arg) { /* Variadic. */ error_at (loc, "%qD must not have variable number of arguments", decl); return false; } ++arity; } /* Verify correct number of arguments. */ switch (op_flags) { case OVL_OP_FLAG_AMBIARY: if (arity == 1) { /* We have a unary instance of an ambi-ary op. Remap to the unary one. */ unsigned alt = ovl_op_alternate[ovl_op_mapping [operator_code]]; const ovl_op_info_t *ovl_op = &ovl_op_info[false][alt]; gcc_checking_assert (ovl_op->flags == OVL_OP_FLAG_UNARY); operator_code = ovl_op->tree_code; DECL_OVERLOADED_OPERATOR_CODE_RAW (decl) = ovl_op->ovl_op_code; } else if (arity != 2) { /* This was an ambiguous operator but is invalid. */ error_at (loc, methodp ? G_("%qD must have either zero or one argument") : G_("%qD must have either one or two arguments"), decl); return false; } else if ((operator_code == POSTINCREMENT_EXPR || operator_code == POSTDECREMENT_EXPR) && ! processing_template_decl /* x++ and x--'s second argument must be an int. */ && ! same_type_p (TREE_VALUE (TREE_CHAIN (argtypes)), integer_type_node)) { error_at (loc, methodp ? G_("postfix %qD must have %<int%> as its argument") : G_("postfix %qD must have %<int%> as its second argument"), decl); return false; } break; case OVL_OP_FLAG_UNARY: if (arity != 1) { error_at (loc, methodp ? G_("%qD must have no arguments") : G_("%qD must have exactly one argument"), decl); return false; } break; case OVL_OP_FLAG_BINARY: if (arity != 2) { error_at (loc, methodp ? G_("%qD must have exactly one argument") : G_("%qD must have exactly two arguments"), decl); return false; } break; default: gcc_unreachable (); } /* There can be no default arguments. */ for (tree arg = argtypes; arg != void_list_node; arg = TREE_CHAIN (arg)) if (TREE_PURPOSE (arg)) { TREE_PURPOSE (arg) = NULL_TREE; error_at (loc, "%qD cannot have default arguments", decl); return false; } /* At this point the declaration is well-formed. It may not be sensible though. */ /* Check member function warnings only on the in-class declaration. There's no point warning on an out-of-class definition. */ if (class_type && class_type != current_class_type) return true; /* Warn about conversion operators that will never be used. */ if (IDENTIFIER_CONV_OP_P (name) && ! DECL_TEMPLATE_INFO (decl) && warn_class_conversion) { tree t = TREE_TYPE (name); int ref = TYPE_REF_P (t); if (ref) t = TYPE_MAIN_VARIANT (TREE_TYPE (t)); if (VOID_TYPE_P (t)) warning_at (loc, OPT_Wclass_conversion, "converting %qT to %<void%> " "will never use a type conversion operator", class_type); else if (class_type) { if (same_type_ignoring_top_level_qualifiers_p (t, class_type)) warning_at (loc, OPT_Wclass_conversion, ref ? G_("converting %qT to a reference to the same type " "will never use a type conversion operator") : G_("converting %qT to the same type " "will never use a type conversion operator"), class_type); /* Don't force t to be complete here. */ else if (MAYBE_CLASS_TYPE_P (t) && COMPLETE_TYPE_P (t) && DERIVED_FROM_P (t, class_type)) warning_at (loc, OPT_Wclass_conversion, ref ? G_("converting %qT to a reference to a base class " "%qT will never use a type conversion operator") : G_("converting %qT to a base class %qT " "will never use a type conversion operator"), class_type, t); } } if (!warn_ecpp) return true; /* Effective C++ rules below. */ /* More Effective C++ rule 7. */ if (operator_code == TRUTH_ANDIF_EXPR || operator_code == TRUTH_ORIF_EXPR || operator_code == COMPOUND_EXPR) warning_at (loc, OPT_Weffc__, "user-defined %qD always evaluates both arguments", decl); /* More Effective C++ rule 6. */ if (operator_code == POSTINCREMENT_EXPR || operator_code == POSTDECREMENT_EXPR || operator_code == PREINCREMENT_EXPR || operator_code == PREDECREMENT_EXPR) { tree arg = TREE_VALUE (argtypes); tree ret = TREE_TYPE (TREE_TYPE (decl)); if (methodp || TYPE_REF_P (arg)) arg = TREE_TYPE (arg); arg = TYPE_MAIN_VARIANT (arg); if (operator_code == PREINCREMENT_EXPR || operator_code == PREDECREMENT_EXPR) { if (!TYPE_REF_P (ret) || !same_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (ret)), arg)) warning_at (loc, OPT_Weffc__, "prefix %qD should return %qT", decl, build_reference_type (arg)); } else { if (!same_type_p (TYPE_MAIN_VARIANT (ret), arg)) warning_at (loc, OPT_Weffc__, "postfix %qD should return %qT", decl, arg); } } /* Effective C++ rule 23. */ if (!DECL_ASSIGNMENT_OPERATOR_P (decl) && (operator_code == PLUS_EXPR || operator_code == MINUS_EXPR || operator_code == TRUNC_DIV_EXPR || operator_code == MULT_EXPR || operator_code == TRUNC_MOD_EXPR) && TYPE_REF_P (TREE_TYPE (TREE_TYPE (decl)))) warning_at (loc, OPT_Weffc__, "%qD should return by value", decl); return true; } /* Return a string giving the keyword associate with CODE. */ static const char * tag_name (enum tag_types code) { switch (code) { case record_type: return "struct"; case class_type: return "class"; case union_type: return "union"; case enum_type: return "enum"; case typename_type: return "typename"; default: gcc_unreachable (); } } /* Name lookup in an elaborated-type-specifier (after the keyword indicated by TAG_CODE) has found the TYPE_DECL DECL. If the elaborated-type-specifier is invalid, issue a diagnostic and return error_mark_node; otherwise, return the *_TYPE to which it referred. If ALLOW_TEMPLATE_P is true, TYPE may be a class template. */ tree check_elaborated_type_specifier (enum tag_types tag_code, tree decl, bool allow_template_p) { tree type; /* In the case of: struct S { struct S *p; }; name lookup will find the TYPE_DECL for the implicit "S::S" typedef. Adjust for that here. */ if (DECL_SELF_REFERENCE_P (decl)) decl = TYPE_NAME (TREE_TYPE (decl)); type = TREE_TYPE (decl); /* Check TEMPLATE_TYPE_PARM first because DECL_IMPLICIT_TYPEDEF_P is false for this case as well. */ if (TREE_CODE (type) == TEMPLATE_TYPE_PARM) { error ("using template type parameter %qT after %qs", type, tag_name (tag_code)); return error_mark_node; } /* Accept template template parameters. */ else if (allow_template_p && (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM || TREE_CODE (type) == TEMPLATE_TEMPLATE_PARM)) ; /* [dcl.type.elab] If the identifier resolves to a typedef-name or the simple-template-id resolves to an alias template specialization, the elaborated-type-specifier is ill-formed. In other words, the only legitimate declaration to use in the elaborated type specifier is the implicit typedef created when the type is declared. */ else if (!DECL_IMPLICIT_TYPEDEF_P (decl) && !DECL_SELF_REFERENCE_P (decl) && tag_code != typename_type) { if (alias_template_specialization_p (type, nt_opaque)) error ("using alias template specialization %qT after %qs", type, tag_name (tag_code)); else error ("using typedef-name %qD after %qs", decl, tag_name (tag_code)); inform (DECL_SOURCE_LOCATION (decl), "%qD has a previous declaration here", decl); return error_mark_node; } else if (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE && tag_code != enum_type && tag_code != typename_type) { error ("%qT referred to as %qs", type, tag_name (tag_code)); inform (location_of (type), "%qT has a previous declaration here", type); return error_mark_node; } else if (TREE_CODE (type) != ENUMERAL_TYPE && tag_code == enum_type) { error ("%qT referred to as enum", type); inform (location_of (type), "%qT has a previous declaration here", type); return error_mark_node; } else if (!allow_template_p && TREE_CODE (type) == RECORD_TYPE && CLASSTYPE_IS_TEMPLATE (type)) { /* If a class template appears as elaborated type specifier without a template header such as: template <class T> class C {}; void f(class C); // No template header here then the required template argument is missing. */ error ("template argument required for %<%s %T%>", tag_name (tag_code), DECL_NAME (CLASSTYPE_TI_TEMPLATE (type))); return error_mark_node; } return type; } /* Lookup NAME of an elaborated type specifier according to SCOPE and issue diagnostics if necessary. Return *_TYPE node upon success, NULL_TREE when the NAME is not found, and ERROR_MARK_NODE for type error. */ static tree lookup_and_check_tag (enum tag_types tag_code, tree name, TAG_how how, bool template_header_p) { tree decl; if (how == TAG_how::GLOBAL) { /* First try ordinary name lookup, ignoring hidden class name injected via friend declaration. */ decl = lookup_name (name, LOOK_want::TYPE); decl = strip_using_decl (decl); /* If that fails, the name will be placed in the smallest non-class, non-function-prototype scope according to 3.3.1/5. We may already have a hidden name declared as friend in this scope. So lookup again but not ignoring hidden names. If we find one, that name will be made visible rather than creating a new tag. */ if (!decl) decl = lookup_elaborated_type (name, TAG_how::INNERMOST_NON_CLASS); } else decl = lookup_elaborated_type (name, how); if (!decl) /* We found nothing. */ return NULL_TREE; if (TREE_CODE (decl) == TREE_LIST) { error ("reference to %qD is ambiguous", name); print_candidates (decl); return error_mark_node; } if (DECL_CLASS_TEMPLATE_P (decl) /* If scope is TAG_how::CURRENT_ONLY we're defining a class, so ignore a template template parameter. */ || (how != TAG_how::CURRENT_ONLY && DECL_TEMPLATE_TEMPLATE_PARM_P (decl))) decl = DECL_TEMPLATE_RESULT (decl); if (TREE_CODE (decl) != TYPE_DECL) /* Found not-a-type. */ return NULL_TREE; /* Look for invalid nested type: class C { class C {}; }; */ if (how == TAG_how::CURRENT_ONLY && DECL_SELF_REFERENCE_P (decl)) { error ("%qD has the same name as the class in which it is " "declared", decl); return error_mark_node; } /* Two cases we need to consider when deciding if a class template is allowed as an elaborated type specifier: 1. It is a self reference to its own class. 2. It comes with a template header. For example: template <class T> class C { class C *c1; // DECL_SELF_REFERENCE_P is true class D; }; template <class U> class C; // template_header_p is true template <class T> class C<T>::D { class C *c2; // DECL_SELF_REFERENCE_P is true }; */ tree t = check_elaborated_type_specifier (tag_code, decl, template_header_p | DECL_SELF_REFERENCE_P (decl)); if (template_header_p && t && CLASS_TYPE_P (t) && (!CLASSTYPE_TEMPLATE_INFO (t) || (!PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (t))))) { error ("%qT is not a template", t); inform (location_of (t), "previous declaration here"); if (TYPE_CLASS_SCOPE_P (t) && CLASSTYPE_TEMPLATE_INFO (TYPE_CONTEXT (t))) inform (input_location, "perhaps you want to explicitly add %<%T::%>", TYPE_CONTEXT (t)); return error_mark_node; } return t; } /* Get the struct, enum or union (TAG_CODE says which) with tag NAME. Define the tag as a forward-reference if it is not defined. If a declaration is given, process it here, and report an error if multiple declarations are not identical. SCOPE is TS_CURRENT when this is also a definition. Only look in the current frame for the name (since C++ allows new names in any scope.) It is TS_WITHIN_ENCLOSING_NON_CLASS if this is a friend declaration. Only look beginning from the current scope outward up till the nearest non-class scope. Otherwise it is TS_GLOBAL. TEMPLATE_HEADER_P is true when this declaration is preceded by a set of template parameters. */ static tree xref_tag_1 (enum tag_types tag_code, tree name, TAG_how how, bool template_header_p) { enum tree_code code; tree context = NULL_TREE; gcc_assert (identifier_p (name)); switch (tag_code) { case record_type: case class_type: code = RECORD_TYPE; break; case union_type: code = UNION_TYPE; break; case enum_type: code = ENUMERAL_TYPE; break; default: gcc_unreachable (); } /* In case of anonymous name, xref_tag is only called to make type node and push name. Name lookup is not required. */ tree t = NULL_TREE; if (!IDENTIFIER_ANON_P (name)) t = lookup_and_check_tag (tag_code, name, how, template_header_p); if (t == error_mark_node) return error_mark_node; if (how != TAG_how::CURRENT_ONLY && t && current_class_type && template_class_depth (current_class_type) && template_header_p) { if (TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM) return t; /* Since HOW is not TAG_how::CURRENT_ONLY, we are not looking at a definition of this tag. Since, in addition, we are currently processing a (member) template declaration of a template class, we must be very careful; consider: template <class X> struct S1 template <class U> struct S2 { template <class V> friend struct S1; }; Here, the S2::S1 declaration should not be confused with the outer declaration. In particular, the inner version should have a template parameter of level 2, not level 1. On the other hand, when presented with: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> friend struct S2; }; the friend must find S1::S2 eventually. We accomplish this by making sure that the new type we create to represent this declaration has the right TYPE_CONTEXT. */ context = TYPE_CONTEXT (t); t = NULL_TREE; } if (! t) { /* If no such tag is yet defined, create a forward-reference node and record it as the "definition". When a real declaration of this type is found, the forward-reference will be altered into a real type. */ if (code == ENUMERAL_TYPE) { error ("use of enum %q#D without previous declaration", name); return error_mark_node; } t = make_class_type (code); TYPE_CONTEXT (t) = context; if (IDENTIFIER_LAMBDA_P (name)) /* Mark it as a lambda type right now. Our caller will correct the value. */ CLASSTYPE_LAMBDA_EXPR (t) = error_mark_node; t = pushtag (name, t, how); } else { if (template_header_p && MAYBE_CLASS_TYPE_P (t)) { /* Check that we aren't trying to overload a class with different constraints. */ tree constr = NULL_TREE; if (current_template_parms) { tree reqs = TEMPLATE_PARMS_CONSTRAINTS (current_template_parms); constr = build_constraints (reqs, NULL_TREE); } if (!redeclare_class_template (t, current_template_parms, constr)) return error_mark_node; } else if (!processing_template_decl && CLASS_TYPE_P (t) && CLASSTYPE_IS_TEMPLATE (t)) { error ("redeclaration of %qT as a non-template", t); inform (location_of (t), "previous declaration %qD", t); return error_mark_node; } } return t; } /* Wrapper for xref_tag_1. */ tree xref_tag (enum tag_types tag_code, tree name, TAG_how how, bool template_header_p) { bool subtime = timevar_cond_start (TV_NAME_LOOKUP); tree ret = xref_tag_1 (tag_code, name, how, template_header_p); timevar_cond_stop (TV_NAME_LOOKUP, subtime); return ret; } /* Create the binfo hierarchy for REF with (possibly NULL) base list BASE_LIST. For each element on BASE_LIST the TREE_PURPOSE is an access_* node, and the TREE_VALUE is the type of the base-class. Non-NULL TREE_TYPE indicates virtual inheritance. */ void xref_basetypes (tree ref, tree base_list) { tree *basep; tree binfo, base_binfo; unsigned max_vbases = 0; /* Maximum direct & indirect virtual bases. */ unsigned max_bases = 0; /* Maximum direct bases. */ unsigned max_dvbases = 0; /* Maximum direct virtual bases. */ int i; tree default_access; tree igo_prev; /* Track Inheritance Graph Order. */ if (ref == error_mark_node) return; /* The base of a derived class is private by default, all others are public. */ default_access = (TREE_CODE (ref) == RECORD_TYPE && CLASSTYPE_DECLARED_CLASS (ref) ? access_private_node : access_public_node); /* First, make sure that any templates in base-classes are instantiated. This ensures that if we call ourselves recursively we do not get confused about which classes are marked and which are not. */ basep = &base_list; while (*basep) { tree basetype = TREE_VALUE (*basep); /* The dependent_type_p call below should really be dependent_scope_p so that we give a hard error about using an incomplete type as a base, but we allow it with a pedwarn for backward compatibility. */ if (processing_template_decl && CLASS_TYPE_P (basetype) && TYPE_BEING_DEFINED (basetype)) cxx_incomplete_type_diagnostic (NULL_TREE, basetype, DK_PEDWARN); if (!dependent_type_p (basetype) && !complete_type_or_else (basetype, NULL)) /* An incomplete type. Remove it from the list. */ *basep = TREE_CHAIN (*basep); else { max_bases++; if (TREE_TYPE (*basep)) max_dvbases++; if (CLASS_TYPE_P (basetype)) max_vbases += vec_safe_length (CLASSTYPE_VBASECLASSES (basetype)); basep = &TREE_CHAIN (*basep); } } max_vbases += max_dvbases; TYPE_MARKED_P (ref) = 1; /* The binfo slot should be empty, unless this is an (ill-formed) redefinition. */ gcc_assert (!TYPE_BINFO (ref) || TYPE_SIZE (ref)); gcc_assert (TYPE_MAIN_VARIANT (ref) == ref); binfo = make_tree_binfo (max_bases); TYPE_BINFO (ref) = binfo; BINFO_OFFSET (binfo) = size_zero_node; BINFO_TYPE (binfo) = ref; /* Apply base-class info set up to the variants of this type. */ fixup_type_variants (ref); if (max_bases) { vec_alloc (BINFO_BASE_ACCESSES (binfo), max_bases); /* A C++98 POD cannot have base classes. */ CLASSTYPE_NON_LAYOUT_POD_P (ref) = true; if (TREE_CODE (ref) == UNION_TYPE) { error ("derived union %qT invalid", ref); return; } } if (max_bases > 1) warning (OPT_Wmultiple_inheritance, "%qT defined with multiple direct bases", ref); if (max_vbases) { /* An aggregate can't have virtual base classes. */ CLASSTYPE_NON_AGGREGATE (ref) = true; vec_alloc (CLASSTYPE_VBASECLASSES (ref), max_vbases); if (max_dvbases) warning (OPT_Wvirtual_inheritance, "%qT defined with direct virtual base", ref); } for (igo_prev = binfo; base_list; base_list = TREE_CHAIN (base_list)) { tree access = TREE_PURPOSE (base_list); int via_virtual = TREE_TYPE (base_list) != NULL_TREE; tree basetype = TREE_VALUE (base_list); if (access == access_default_node) access = default_access; /* Before C++17, an aggregate cannot have base classes. In C++17, an aggregate can't have virtual, private, or protected base classes. */ if (cxx_dialect < cxx17 || access != access_public_node || via_virtual) CLASSTYPE_NON_AGGREGATE (ref) = true; if (PACK_EXPANSION_P (basetype)) basetype = PACK_EXPANSION_PATTERN (basetype); if (TREE_CODE (basetype) == TYPE_DECL) basetype = TREE_TYPE (basetype); if (!MAYBE_CLASS_TYPE_P (basetype) || TREE_CODE (basetype) == UNION_TYPE) { error ("base type %qT fails to be a struct or class type", basetype); goto dropped_base; } base_binfo = NULL_TREE; if (CLASS_TYPE_P (basetype) && !dependent_scope_p (basetype)) { base_binfo = TYPE_BINFO (basetype); /* The original basetype could have been a typedef'd type. */ basetype = BINFO_TYPE (base_binfo); /* Inherit flags from the base. */ TYPE_HAS_NEW_OPERATOR (ref) |= TYPE_HAS_NEW_OPERATOR (basetype); TYPE_HAS_ARRAY_NEW_OPERATOR (ref) |= TYPE_HAS_ARRAY_NEW_OPERATOR (basetype); TYPE_GETS_DELETE (ref) |= TYPE_GETS_DELETE (basetype); TYPE_HAS_CONVERSION (ref) |= TYPE_HAS_CONVERSION (basetype); CLASSTYPE_DIAMOND_SHAPED_P (ref) |= CLASSTYPE_DIAMOND_SHAPED_P (basetype); CLASSTYPE_REPEATED_BASE_P (ref) |= CLASSTYPE_REPEATED_BASE_P (basetype); } /* We must do this test after we've seen through a typedef type. */ if (TYPE_MARKED_P (basetype)) { if (basetype == ref) error ("recursive type %qT undefined", basetype); else error ("duplicate base type %qT invalid", basetype); goto dropped_base; } if (PACK_EXPANSION_P (TREE_VALUE (base_list))) /* Regenerate the pack expansion for the bases. */ basetype = make_pack_expansion (basetype); TYPE_MARKED_P (basetype) = 1; base_binfo = copy_binfo (base_binfo, basetype, ref, &igo_prev, via_virtual); if (!BINFO_INHERITANCE_CHAIN (base_binfo)) BINFO_INHERITANCE_CHAIN (base_binfo) = binfo; BINFO_BASE_APPEND (binfo, base_binfo); BINFO_BASE_ACCESS_APPEND (binfo, access); continue; dropped_base: /* Update max_vbases to reflect the reality that we are dropping this base: if it reaches zero we want to undo the vec_alloc above to avoid inconsistencies during error-recovery: eg, in build_special_member_call, CLASSTYPE_VBASECLASSES non null and vtt null (c++/27952). */ if (via_virtual) max_vbases--; if (CLASS_TYPE_P (basetype)) max_vbases -= vec_safe_length (CLASSTYPE_VBASECLASSES (basetype)); } if (CLASSTYPE_VBASECLASSES (ref) && max_vbases == 0) vec_free (CLASSTYPE_VBASECLASSES (ref)); if (vec_safe_length (CLASSTYPE_VBASECLASSES (ref)) < max_vbases) /* If we didn't get max_vbases vbases, we must have shared at least one of them, and are therefore diamond shaped. */ CLASSTYPE_DIAMOND_SHAPED_P (ref) = 1; /* Unmark all the types. */ for (i = 0; BINFO_BASE_ITERATE (binfo, i, base_binfo); i++) TYPE_MARKED_P (BINFO_TYPE (base_binfo)) = 0; TYPE_MARKED_P (ref) = 0; /* Now see if we have a repeated base type. */ if (!CLASSTYPE_REPEATED_BASE_P (ref)) { for (base_binfo = binfo; base_binfo; base_binfo = TREE_CHAIN (base_binfo)) { if (TYPE_MARKED_P (BINFO_TYPE (base_binfo))) { CLASSTYPE_REPEATED_BASE_P (ref) = 1; break; } TYPE_MARKED_P (BINFO_TYPE (base_binfo)) = 1; } for (base_binfo = binfo; base_binfo; base_binfo = TREE_CHAIN (base_binfo)) if (TYPE_MARKED_P (BINFO_TYPE (base_binfo))) TYPE_MARKED_P (BINFO_TYPE (base_binfo)) = 0; else break; } } /* Copies the enum-related properties from type SRC to type DST. Used with the underlying type of an enum and the enum itself. */ static void copy_type_enum (tree dst, tree src) { tree t; for (t = dst; t; t = TYPE_NEXT_VARIANT (t)) { TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (src); TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (src); TYPE_SIZE (t) = TYPE_SIZE (src); TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (src); SET_TYPE_MODE (dst, TYPE_MODE (src)); TYPE_PRECISION (t) = TYPE_PRECISION (src); unsigned valign = TYPE_ALIGN (src); if (TYPE_USER_ALIGN (t)) valign = MAX (valign, TYPE_ALIGN (t)); else TYPE_USER_ALIGN (t) = TYPE_USER_ALIGN (src); SET_TYPE_ALIGN (t, valign); TYPE_UNSIGNED (t) = TYPE_UNSIGNED (src); } } /* Begin compiling the definition of an enumeration type. NAME is its name, if ENUMTYPE is not NULL_TREE then the type has alredy been found. UNDERLYING_TYPE is the type that will be used as the storage for the enumeration type. This should be NULL_TREE if no storage type was specified. ATTRIBUTES are any attributes specified after the enum-key. SCOPED_ENUM_P is true if this is a scoped enumeration type. if IS_NEW is not NULL, gets TRUE iff a new type is created. Returns the type object, as yet incomplete. Also records info about it so that build_enumerator may be used to declare the individual values as they are read. */ tree start_enum (tree name, tree enumtype, tree underlying_type, tree attributes, bool scoped_enum_p, bool *is_new) { tree prevtype = NULL_TREE; gcc_assert (identifier_p (name)); if (is_new) *is_new = false; /* [C++0x dcl.enum]p5: If not explicitly specified, the underlying type of a scoped enumeration type is int. */ if (!underlying_type && scoped_enum_p) underlying_type = integer_type_node; if (underlying_type) underlying_type = cv_unqualified (underlying_type); /* If this is the real definition for a previous forward reference, fill in the contents in the same object that used to be the forward reference. */ if (!enumtype) enumtype = lookup_and_check_tag (enum_type, name, /*tag_scope=*/TAG_how::CURRENT_ONLY, /*template_header_p=*/false); /* In case of a template_decl, the only check that should be deferred to instantiation time is the comparison of underlying types. */ if (enumtype && TREE_CODE (enumtype) == ENUMERAL_TYPE) { if (scoped_enum_p != SCOPED_ENUM_P (enumtype)) { error_at (input_location, "scoped/unscoped mismatch " "in enum %q#T", enumtype); inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (enumtype)), "previous definition here"); enumtype = error_mark_node; } else if (ENUM_FIXED_UNDERLYING_TYPE_P (enumtype) != !! underlying_type) { error_at (input_location, "underlying type mismatch " "in enum %q#T", enumtype); inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (enumtype)), "previous definition here"); enumtype = error_mark_node; } else if (underlying_type && ENUM_UNDERLYING_TYPE (enumtype) && !same_type_p (underlying_type, ENUM_UNDERLYING_TYPE (enumtype))) { error_at (input_location, "different underlying type " "in enum %q#T", enumtype); inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (enumtype)), "previous definition here"); underlying_type = NULL_TREE; } } if (!enumtype || TREE_CODE (enumtype) != ENUMERAL_TYPE || processing_template_decl) { /* In case of error, make a dummy enum to allow parsing to continue. */ if (enumtype == error_mark_node) { name = make_anon_name (); enumtype = NULL_TREE; } /* enumtype may be an ENUMERAL_TYPE if this is a redefinition of an opaque enum, or an opaque enum of an already defined enumeration (C++11). In any other case, it'll be NULL_TREE. */ if (!enumtype) { if (is_new) *is_new = true; } prevtype = enumtype; /* Do not push the decl more than once. */ if (!enumtype || TREE_CODE (enumtype) != ENUMERAL_TYPE) { enumtype = cxx_make_type (ENUMERAL_TYPE); enumtype = pushtag (name, enumtype); /* std::byte aliases anything. */ if (enumtype != error_mark_node && TYPE_CONTEXT (enumtype) == std_node && !strcmp ("byte", TYPE_NAME_STRING (enumtype))) TYPE_ALIAS_SET (enumtype) = 0; } else enumtype = xref_tag (enum_type, name); if (enumtype == error_mark_node) return error_mark_node; /* The enum is considered opaque until the opening '{' of the enumerator list. */ SET_OPAQUE_ENUM_P (enumtype, true); ENUM_FIXED_UNDERLYING_TYPE_P (enumtype) = !! underlying_type; } SET_SCOPED_ENUM_P (enumtype, scoped_enum_p); cplus_decl_attributes (&enumtype, attributes, (int)ATTR_FLAG_TYPE_IN_PLACE); if (underlying_type) { if (ENUM_UNDERLYING_TYPE (enumtype)) /* We already checked that it matches, don't change it to a different typedef variant. */; else if (CP_INTEGRAL_TYPE_P (underlying_type)) { copy_type_enum (enumtype, underlying_type); ENUM_UNDERLYING_TYPE (enumtype) = underlying_type; } else if (dependent_type_p (underlying_type)) ENUM_UNDERLYING_TYPE (enumtype) = underlying_type; else error ("underlying type %qT of %qT must be an integral type", underlying_type, enumtype); } /* If into a template class, the returned enum is always the first declaration (opaque or not) seen. This way all the references to this type will be to the same declaration. The following ones are used only to check for definition errors. */ if (prevtype && processing_template_decl) return prevtype; else return enumtype; } /* After processing and defining all the values of an enumeration type, install their decls in the enumeration type. ENUMTYPE is the type object. */ void finish_enum_value_list (tree enumtype) { tree values; tree underlying_type; tree decl; tree value; tree minnode, maxnode; tree t; bool fixed_underlying_type_p = ENUM_UNDERLYING_TYPE (enumtype) != NULL_TREE; /* We built up the VALUES in reverse order. */ TYPE_VALUES (enumtype) = nreverse (TYPE_VALUES (enumtype)); /* For an enum defined in a template, just set the type of the values; all further processing is postponed until the template is instantiated. We need to set the type so that tsubst of a CONST_DECL works. */ if (processing_template_decl) { for (values = TYPE_VALUES (enumtype); values; values = TREE_CHAIN (values)) TREE_TYPE (TREE_VALUE (values)) = enumtype; return; } /* Determine the minimum and maximum values of the enumerators. */ if (TYPE_VALUES (enumtype)) { minnode = maxnode = NULL_TREE; for (values = TYPE_VALUES (enumtype); values; values = TREE_CHAIN (values)) { decl = TREE_VALUE (values); /* [dcl.enum]: Following the closing brace of an enum-specifier, each enumerator has the type of its enumeration. Prior to the closing brace, the type of each enumerator is the type of its initializing value. */ TREE_TYPE (decl) = enumtype; /* Update the minimum and maximum values, if appropriate. */ value = DECL_INITIAL (decl); if (value == error_mark_node) value = integer_zero_node; /* Figure out what the minimum and maximum values of the enumerators are. */ if (!minnode) minnode = maxnode = value; else if (tree_int_cst_lt (maxnode, value)) maxnode = value; else if (tree_int_cst_lt (value, minnode)) minnode = value; } } else /* [dcl.enum] If the enumerator-list is empty, the underlying type is as if the enumeration had a single enumerator with value 0. */ minnode = maxnode = integer_zero_node; if (!fixed_underlying_type_p) { /* Compute the number of bits require to represent all values of the enumeration. We must do this before the type of MINNODE and MAXNODE are transformed, since tree_int_cst_min_precision relies on the TREE_TYPE of the value it is passed. */ signop sgn = tree_int_cst_sgn (minnode) >= 0 ? UNSIGNED : SIGNED; int lowprec = tree_int_cst_min_precision (minnode, sgn); int highprec = tree_int_cst_min_precision (maxnode, sgn); int precision = MAX (lowprec, highprec); unsigned int itk; bool use_short_enum; /* Determine the underlying type of the enumeration. [dcl.enum] The underlying type of an enumeration is an integral type that can represent all the enumerator values defined in the enumeration. It is implementation-defined which integral type is used as the underlying type for an enumeration except that the underlying type shall not be larger than int unless the value of an enumerator cannot fit in an int or unsigned int. We use "int" or an "unsigned int" as the underlying type, even if a smaller integral type would work, unless the user has explicitly requested that we use the smallest possible type. The user can request that for all enumerations with a command line flag, or for just one enumeration with an attribute. */ use_short_enum = flag_short_enums || lookup_attribute ("packed", TYPE_ATTRIBUTES (enumtype)); /* If the precision of the type was specified with an attribute and it was too small, give an error. Otherwise, use it. */ if (TYPE_PRECISION (enumtype)) { if (precision > TYPE_PRECISION (enumtype)) error ("specified mode too small for enumerated values"); else { use_short_enum = true; precision = TYPE_PRECISION (enumtype); } } for (itk = (use_short_enum ? itk_char : itk_int); itk != itk_none; itk++) { underlying_type = integer_types[itk]; if (underlying_type != NULL_TREE && TYPE_PRECISION (underlying_type) >= precision && TYPE_SIGN (underlying_type) == sgn) break; } if (itk == itk_none) { /* DR 377 IF no integral type can represent all the enumerator values, the enumeration is ill-formed. */ error ("no integral type can represent all of the enumerator values " "for %qT", enumtype); precision = TYPE_PRECISION (long_long_integer_type_node); underlying_type = integer_types[itk_unsigned_long_long]; } /* [dcl.enum] The value of sizeof() applied to an enumeration type, an object of an enumeration type, or an enumerator, is the value of sizeof() applied to the underlying type. */ copy_type_enum (enumtype, underlying_type); /* Compute the minimum and maximum values for the type. [dcl.enum] For an enumeration where emin is the smallest enumerator and emax is the largest, the values of the enumeration are the values of the underlying type in the range bmin to bmax, where bmin and bmax are, respectively, the smallest and largest values of the smallest bit- field that can store emin and emax. */ /* The middle-end currently assumes that types with TYPE_PRECISION narrower than their underlying type are suitably zero or sign extended to fill their mode. Similarly, it assumes that the front end assures that a value of a particular type must be within TYPE_MIN_VALUE and TYPE_MAX_VALUE. We used to set these fields based on bmin and bmax, but that led to invalid assumptions like optimizing away bounds checking. So now we just set the TYPE_PRECISION, TYPE_MIN_VALUE, and TYPE_MAX_VALUE to the values for the mode above and only restrict the ENUM_UNDERLYING_TYPE for the benefit of diagnostics. */ ENUM_UNDERLYING_TYPE (enumtype) = build_distinct_type_copy (underlying_type); TYPE_PRECISION (ENUM_UNDERLYING_TYPE (enumtype)) = precision; set_min_and_max_values_for_integral_type (ENUM_UNDERLYING_TYPE (enumtype), precision, sgn); /* If -fstrict-enums, still constrain TYPE_MIN/MAX_VALUE. */ if (flag_strict_enums) set_min_and_max_values_for_integral_type (enumtype, precision, sgn); } else underlying_type = ENUM_UNDERLYING_TYPE (enumtype); /* Convert each of the enumerators to the type of the underlying type of the enumeration. */ for (values = TYPE_VALUES (enumtype); values; values = TREE_CHAIN (values)) { decl = TREE_VALUE (values); iloc_sentinel ils (DECL_SOURCE_LOCATION (decl)); if (fixed_underlying_type_p) /* If the enumeration type has a fixed underlying type, we already checked all of the enumerator values. */ value = DECL_INITIAL (decl); else value = perform_implicit_conversion (underlying_type, DECL_INITIAL (decl), tf_warning_or_error); /* Do not clobber shared ints. */ if (value != error_mark_node) { value = copy_node (value); TREE_TYPE (value) = enumtype; } DECL_INITIAL (decl) = value; } /* Fix up all variant types of this enum type. */ for (t = TYPE_MAIN_VARIANT (enumtype); t; t = TYPE_NEXT_VARIANT (t)) TYPE_VALUES (t) = TYPE_VALUES (enumtype); if (at_class_scope_p () && COMPLETE_TYPE_P (current_class_type) && UNSCOPED_ENUM_P (enumtype)) { insert_late_enum_def_bindings (current_class_type, enumtype); /* TYPE_FIELDS needs fixup. */ fixup_type_variants (current_class_type); } /* Finish debugging output for this type. */ rest_of_type_compilation (enumtype, namespace_bindings_p ()); /* Each enumerator now has the type of its enumeration. Clear the cache so that this change in types doesn't confuse us later on. */ clear_cv_and_fold_caches (); } /* Finishes the enum type. This is called only the first time an enumeration is seen, be it opaque or odinary. ENUMTYPE is the type object. */ void finish_enum (tree enumtype) { if (processing_template_decl) { if (at_function_scope_p ()) add_stmt (build_min (TAG_DEFN, enumtype)); return; } /* If this is a forward declaration, there should not be any variants, though we can get a variant in the middle of an enum-specifier with wacky code like 'enum E { e = sizeof(const E*) };' */ gcc_assert (enumtype == TYPE_MAIN_VARIANT (enumtype) && (TYPE_VALUES (enumtype) || !TYPE_NEXT_VARIANT (enumtype))); } /* Build and install a CONST_DECL for an enumeration constant of the enumeration type ENUMTYPE whose NAME and VALUE (if any) are provided. Apply ATTRIBUTES if available. LOC is the location of NAME. Assignment of sequential values by default is handled here. */ void build_enumerator (tree name, tree value, tree enumtype, tree attributes, location_t loc) { tree decl; tree context; tree type; /* scalar_constant_value will pull out this expression, so make sure it's folded as appropriate. */ if (processing_template_decl) value = fold_non_dependent_expr (value); /* If the VALUE was erroneous, pretend it wasn't there; that will result in the enum being assigned the next value in sequence. */ if (value == error_mark_node) value = NULL_TREE; /* Remove no-op casts from the value. */ if (value) STRIP_TYPE_NOPS (value); if (! processing_template_decl) { /* Validate and default VALUE. */ if (value != NULL_TREE) { if (!ENUM_UNDERLYING_TYPE (enumtype)) { tree tmp_value = build_expr_type_conversion (WANT_INT | WANT_ENUM, value, true); if (tmp_value) value = tmp_value; } else if (! INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (value))) value = perform_implicit_conversion_flags (ENUM_UNDERLYING_TYPE (enumtype), value, tf_warning_or_error, LOOKUP_IMPLICIT | LOOKUP_NO_NARROWING); if (value == error_mark_node) value = NULL_TREE; if (value != NULL_TREE) { if (! INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (value))) { error_at (cp_expr_loc_or_input_loc (value), "enumerator value for %qD must have integral or " "unscoped enumeration type", name); value = NULL_TREE; } else { value = cxx_constant_value (value); if (TREE_CODE (value) != INTEGER_CST) { error ("enumerator value for %qD is not an integer " "constant", name); value = NULL_TREE; } } } } /* Default based on previous value. */ if (value == NULL_TREE) { if (TYPE_VALUES (enumtype)) { tree prev_value; /* C++03 7.2/4: If no initializer is specified for the first enumerator, the type is an unspecified integral type. Otherwise the type is the same as the type of the initializing value of the preceding enumerator unless the incremented value is not representable in that type, in which case the type is an unspecified integral type sufficient to contain the incremented value. */ prev_value = DECL_INITIAL (TREE_VALUE (TYPE_VALUES (enumtype))); if (error_operand_p (prev_value)) value = error_mark_node; else { wi::overflow_type overflowed; tree type = TREE_TYPE (prev_value); signop sgn = TYPE_SIGN (type); widest_int wi = wi::add (wi::to_widest (prev_value), 1, sgn, &overflowed); if (!overflowed) { bool pos = !wi::neg_p (wi, sgn); if (!wi::fits_to_tree_p (wi, type)) { unsigned int itk; for (itk = itk_int; itk != itk_none; itk++) { type = integer_types[itk]; if (type != NULL_TREE && (pos || !TYPE_UNSIGNED (type)) && wi::fits_to_tree_p (wi, type)) break; } if (type && cxx_dialect < cxx11 && itk > itk_unsigned_long) pedwarn (input_location, OPT_Wlong_long, pos ? G_("\ incremented enumerator value is too large for %<unsigned long%>") : G_("\ incremented enumerator value is too large for %<long%>")); } if (type == NULL_TREE) overflowed = wi::OVF_UNKNOWN; else value = wide_int_to_tree (type, wi); } if (overflowed) { error ("overflow in enumeration values at %qD", name); value = error_mark_node; } } } else value = integer_zero_node; } /* Remove no-op casts from the value. */ STRIP_TYPE_NOPS (value); /* If the underlying type of the enum is fixed, check whether the enumerator values fits in the underlying type. If it does not fit, the program is ill-formed [C++0x dcl.enum]. */ if (ENUM_UNDERLYING_TYPE (enumtype) && value && TREE_CODE (value) == INTEGER_CST) { if (!int_fits_type_p (value, ENUM_UNDERLYING_TYPE (enumtype))) error ("enumerator value %qE is outside the range of underlying " "type %qT", value, ENUM_UNDERLYING_TYPE (enumtype)); /* Convert the value to the appropriate type. */ value = fold_convert (ENUM_UNDERLYING_TYPE (enumtype), value); } } /* C++ associates enums with global, function, or class declarations. */ context = current_scope (); /* Build the actual enumeration constant. Note that the enumeration constants have the underlying type of the enum (if it is fixed) or the type of their initializer (if the underlying type of the enum is not fixed): [ C++0x dcl.enum ] If the underlying type is fixed, the type of each enumerator prior to the closing brace is the underlying type; if the initializing value of an enumerator cannot be represented by the underlying type, the program is ill-formed. If the underlying type is not fixed, the type of each enumerator is the type of its initializing value. If the underlying type is not fixed, it will be computed by finish_enum and we will reset the type of this enumerator. Of course, if we're processing a template, there may be no value. */ type = value ? TREE_TYPE (value) : NULL_TREE; decl = build_decl (loc, CONST_DECL, name, type); DECL_CONTEXT (decl) = enumtype; TREE_CONSTANT (decl) = 1; TREE_READONLY (decl) = 1; DECL_INITIAL (decl) = value; if (attributes) cplus_decl_attributes (&decl, attributes, 0); if (context && context == current_class_type && !SCOPED_ENUM_P (enumtype)) { /* In something like `struct S { enum E { i = 7 }; };' we put `i' on the TYPE_FIELDS list for `S'. (That's so that you can say things like `S::i' later.) */ /* The enumerator may be getting declared outside of its enclosing class, like so: class S { public: enum E : int; }; enum S::E : int { i = 7; }; For which case we need to make sure that the access of `S::i' matches the access of `S::E'. */ tree saved_cas = current_access_specifier; if (TREE_PRIVATE (TYPE_NAME (enumtype))) current_access_specifier = access_private_node; else if (TREE_PROTECTED (TYPE_NAME (enumtype))) current_access_specifier = access_protected_node; else current_access_specifier = access_public_node; finish_member_declaration (decl); current_access_specifier = saved_cas; } else pushdecl (decl); /* Add this enumeration constant to the list for this type. */ TYPE_VALUES (enumtype) = tree_cons (name, decl, TYPE_VALUES (enumtype)); } /* Look for an enumerator with the given NAME within the enumeration type ENUMTYPE. This routine is used primarily for qualified name lookup into an enumerator in C++0x, e.g., enum class Color { Red, Green, Blue }; Color color = Color::Red; Returns the value corresponding to the enumerator, or NULL_TREE if no such enumerator was found. */ tree lookup_enumerator (tree enumtype, tree name) { tree e; gcc_assert (enumtype && TREE_CODE (enumtype) == ENUMERAL_TYPE); e = purpose_member (name, TYPE_VALUES (enumtype)); return e? TREE_VALUE (e) : NULL_TREE; } /* Implement LANG_HOOKS_SIMULATE_ENUM_DECL. */ tree cxx_simulate_enum_decl (location_t loc, const char *name, vec<string_int_pair> values) { location_t saved_loc = input_location; input_location = loc; tree enumtype = start_enum (get_identifier (name), NULL_TREE, NULL_TREE, NULL_TREE, false, NULL); if (!OPAQUE_ENUM_P (enumtype)) { error_at (loc, "multiple definition of %q#T", enumtype); inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (enumtype)), "previous definition here"); return enumtype; } SET_OPAQUE_ENUM_P (enumtype, false); DECL_SOURCE_LOCATION (TYPE_NAME (enumtype)) = loc; string_int_pair *value; unsigned int i; FOR_EACH_VEC_ELT (values, i, value) build_enumerator (get_identifier (value->first), build_int_cst (integer_type_node, value->second), enumtype, NULL_TREE, loc); finish_enum_value_list (enumtype); finish_enum (enumtype); input_location = saved_loc; return enumtype; } /* We're defining DECL. Make sure that its type is OK. */ static void check_function_type (tree decl, tree current_function_parms) { tree fntype = TREE_TYPE (decl); tree return_type = complete_type (TREE_TYPE (fntype)); /* In a function definition, arg types must be complete. */ require_complete_types_for_parms (current_function_parms); if (dependent_type_p (return_type) || type_uses_auto (return_type)) return; if (!COMPLETE_OR_VOID_TYPE_P (return_type)) { tree args = TYPE_ARG_TYPES (fntype); error ("return type %q#T is incomplete", return_type); /* Make it return void instead. */ if (TREE_CODE (fntype) == METHOD_TYPE) fntype = build_method_type_directly (TREE_TYPE (TREE_VALUE (args)), void_type_node, TREE_CHAIN (args)); else fntype = build_function_type (void_type_node, args); fntype = (cp_build_type_attribute_variant (fntype, TYPE_ATTRIBUTES (TREE_TYPE (decl)))); fntype = cxx_copy_lang_qualifiers (fntype, TREE_TYPE (decl)); TREE_TYPE (decl) = fntype; } else { abstract_virtuals_error (decl, TREE_TYPE (fntype)); maybe_warn_parm_abi (TREE_TYPE (fntype), DECL_SOURCE_LOCATION (decl)); } } /* True iff FN is an implicitly-defined default constructor. */ static bool implicit_default_ctor_p (tree fn) { return (DECL_CONSTRUCTOR_P (fn) && !user_provided_p (fn) && sufficient_parms_p (FUNCTION_FIRST_USER_PARMTYPE (fn))); } /* Clobber the contents of *this to let the back end know that the object storage is dead when we enter the constructor or leave the destructor. */ static tree build_clobber_this () { /* Clobbering an empty base is pointless, and harmful if its one byte TYPE_SIZE overlays real data. */ if (is_empty_class (current_class_type)) return void_node; /* If we have virtual bases, clobber the whole object, but only if we're in charge. If we don't have virtual bases, clobber the as-base type so we don't mess with tail padding. */ bool vbases = CLASSTYPE_VBASECLASSES (current_class_type); tree ctype = current_class_type; if (!vbases) ctype = CLASSTYPE_AS_BASE (ctype); tree clobber = build_clobber (ctype); tree thisref = current_class_ref; if (ctype != current_class_type) { thisref = build_nop (build_reference_type (ctype), current_class_ptr); thisref = convert_from_reference (thisref); } tree exprstmt = build2 (MODIFY_EXPR, void_type_node, thisref, clobber); if (vbases) exprstmt = build_if_in_charge (exprstmt); return exprstmt; } /* Create the FUNCTION_DECL for a function definition. DECLSPECS and DECLARATOR are the parts of the declaration; they describe the function's name and the type it returns, but twisted together in a fashion that parallels the syntax of C. FLAGS is a bitwise or of SF_PRE_PARSED (indicating that the DECLARATOR is really the DECL for the function we are about to process and that DECLSPECS should be ignored), SF_INCLASS_INLINE indicating that the function is an inline defined in-class. This function creates a binding context for the function body as well as setting up the FUNCTION_DECL in current_function_decl. For C++, we must first check whether that datum makes any sense. For example, "class A local_a(1,2);" means that variable local_a is an aggregate of type A, which should have a constructor applied to it with the argument list [1, 2]. On entry, DECL_INITIAL (decl1) should be NULL_TREE or error_mark_node, or may be a BLOCK if the function has been defined previously in this translation unit. On exit, DECL_INITIAL (decl1) will be error_mark_node if the function has never been defined, or a BLOCK if the function has been defined somewhere. */ bool start_preparsed_function (tree decl1, tree attrs, int flags) { tree ctype = NULL_TREE; bool doing_friend = false; /* Sanity check. */ gcc_assert (VOID_TYPE_P (TREE_VALUE (void_list_node))); gcc_assert (TREE_CHAIN (void_list_node) == NULL_TREE); tree fntype = TREE_TYPE (decl1); if (TREE_CODE (fntype) == METHOD_TYPE) ctype = TYPE_METHOD_BASETYPE (fntype); else { ctype = DECL_FRIEND_CONTEXT (decl1); if (ctype) doing_friend = true; } if (DECL_DECLARED_INLINE_P (decl1) && lookup_attribute ("noinline", attrs)) warning_at (DECL_SOURCE_LOCATION (decl1), 0, "inline function %qD given attribute %qs", decl1, "noinline"); /* Handle gnu_inline attribute. */ if (GNU_INLINE_P (decl1)) { DECL_EXTERNAL (decl1) = 1; DECL_NOT_REALLY_EXTERN (decl1) = 0; DECL_INTERFACE_KNOWN (decl1) = 1; DECL_DISREGARD_INLINE_LIMITS (decl1) = 1; } if (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (decl1)) /* This is a constructor, we must ensure that any default args introduced by this definition are propagated to the clones now. The clones are used directly in overload resolution. */ adjust_clone_args (decl1); /* Sometimes we don't notice that a function is a static member, and build a METHOD_TYPE for it. Fix that up now. */ gcc_assert (!(ctype != NULL_TREE && DECL_STATIC_FUNCTION_P (decl1) && TREE_CODE (TREE_TYPE (decl1)) == METHOD_TYPE)); /* Set up current_class_type, and enter the scope of the class, if appropriate. */ if (ctype) push_nested_class (ctype); else if (DECL_STATIC_FUNCTION_P (decl1)) push_nested_class (DECL_CONTEXT (decl1)); /* Now that we have entered the scope of the class, we must restore the bindings for any template parameters surrounding DECL1, if it is an inline member template. (Order is important; consider the case where a template parameter has the same name as a field of the class.) It is not until after this point that PROCESSING_TEMPLATE_DECL is guaranteed to be set up correctly. */ if (flags & SF_INCLASS_INLINE) maybe_begin_member_template_processing (decl1); /* Effective C++ rule 15. */ if (warn_ecpp && DECL_ASSIGNMENT_OPERATOR_P (decl1) && DECL_OVERLOADED_OPERATOR_IS (decl1, NOP_EXPR) && VOID_TYPE_P (TREE_TYPE (fntype))) warning (OPT_Weffc__, "%<operator=%> should return a reference to %<*this%>"); /* Make the init_value nonzero so pushdecl knows this is not tentative. error_mark_node is replaced below (in poplevel) with the BLOCK. */ if (!DECL_INITIAL (decl1)) DECL_INITIAL (decl1) = error_mark_node; /* This function exists in static storage. (This does not mean `static' in the C sense!) */ TREE_STATIC (decl1) = 1; /* We must call push_template_decl after current_class_type is set up. (If we are processing inline definitions after exiting a class scope, current_class_type will be NULL_TREE until set above by push_nested_class.) */ if (processing_template_decl) { tree newdecl1 = push_template_decl (decl1, doing_friend); if (newdecl1 == error_mark_node) { if (ctype || DECL_STATIC_FUNCTION_P (decl1)) pop_nested_class (); return false; } decl1 = newdecl1; } /* Make sure the parameter and return types are reasonable. When you declare a function, these types can be incomplete, but they must be complete when you define the function. */ check_function_type (decl1, DECL_ARGUMENTS (decl1)); /* Build the return declaration for the function. */ tree restype = TREE_TYPE (fntype); if (DECL_RESULT (decl1) == NULL_TREE) { tree resdecl; resdecl = build_decl (input_location, RESULT_DECL, 0, restype); DECL_ARTIFICIAL (resdecl) = 1; DECL_IGNORED_P (resdecl) = 1; DECL_RESULT (decl1) = resdecl; cp_apply_type_quals_to_decl (cp_type_quals (restype), resdecl); } /* Record the decl so that the function name is defined. If we already have a decl for this name, and it is a FUNCTION_DECL, use the old decl. */ if (!processing_template_decl && !(flags & SF_PRE_PARSED)) { /* A specialization is not used to guide overload resolution. */ if (!DECL_FUNCTION_MEMBER_P (decl1) && !(DECL_USE_TEMPLATE (decl1) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl1)))) { tree olddecl = pushdecl (decl1); if (olddecl == error_mark_node) /* If something went wrong when registering the declaration, use DECL1; we have to have a FUNCTION_DECL to use when parsing the body of the function. */ ; else { /* Otherwise, OLDDECL is either a previous declaration of the same function or DECL1 itself. */ if (warn_missing_declarations && olddecl == decl1 && !DECL_MAIN_P (decl1) && TREE_PUBLIC (decl1) && !DECL_DECLARED_INLINE_P (decl1)) { tree context; /* Check whether DECL1 is in an anonymous namespace. */ for (context = DECL_CONTEXT (decl1); context; context = DECL_CONTEXT (context)) { if (TREE_CODE (context) == NAMESPACE_DECL && DECL_NAME (context) == NULL_TREE) break; } if (context == NULL) warning_at (DECL_SOURCE_LOCATION (decl1), OPT_Wmissing_declarations, "no previous declaration for %qD", decl1); } decl1 = olddecl; } } else { /* We need to set the DECL_CONTEXT. */ if (!DECL_CONTEXT (decl1) && DECL_TEMPLATE_INFO (decl1)) DECL_CONTEXT (decl1) = DECL_CONTEXT (DECL_TI_TEMPLATE (decl1)); } fntype = TREE_TYPE (decl1); restype = TREE_TYPE (fntype); /* If #pragma weak applies, mark the decl appropriately now. The pragma only applies to global functions. Because determining whether or not the #pragma applies involves computing the mangled name for the declaration, we cannot apply the pragma until after we have merged this declaration with any previous declarations; if the original declaration has a linkage specification, that specification applies to the definition as well, and may affect the mangled name. */ if (DECL_FILE_SCOPE_P (decl1)) maybe_apply_pragma_weak (decl1); } /* We are now in the scope of the function being defined. */ current_function_decl = decl1; /* Save the parm names or decls from this function's declarator where store_parm_decls will find them. */ tree current_function_parms = DECL_ARGUMENTS (decl1); /* Let the user know we're compiling this function. */ announce_function (decl1); gcc_assert (DECL_INITIAL (decl1)); /* This function may already have been parsed, in which case just return; our caller will skip over the body without parsing. */ if (DECL_INITIAL (decl1) != error_mark_node) return true; /* Initialize RTL machinery. We cannot do this until CURRENT_FUNCTION_DECL and DECL_RESULT are set up. We do this even when processing a template; this is how we get CFUN set up, and our per-function variables initialized. FIXME factor out the non-RTL stuff. */ cp_binding_level *bl = current_binding_level; allocate_struct_function (decl1, processing_template_decl); /* Initialize the language data structures. Whenever we start a new function, we destroy temporaries in the usual way. */ cfun->language = ggc_cleared_alloc<language_function> (); current_stmt_tree ()->stmts_are_full_exprs_p = 1; current_binding_level = bl; /* If we are (erroneously) defining a function that we have already defined before, wipe out what we knew before. */ gcc_checking_assert (!DECL_PENDING_INLINE_P (decl1)); FNDECL_USED_AUTO (decl1) = false; DECL_SAVED_AUTO_RETURN_TYPE (decl1) = NULL; if (!processing_template_decl && type_uses_auto (restype)) { FNDECL_USED_AUTO (decl1) = true; DECL_SAVED_AUTO_RETURN_TYPE (decl1) = restype; } /* Start the statement-tree, start the tree now. */ DECL_SAVED_TREE (decl1) = push_stmt_list (); if (ctype && !doing_friend && !DECL_STATIC_FUNCTION_P (decl1)) { /* We know that this was set up by `grokclassfn'. We do not wait until `store_parm_decls', since evil parse errors may never get us to that point. Here we keep the consistency between `current_class_type' and `current_class_ptr'. */ tree t = DECL_ARGUMENTS (decl1); gcc_assert (t != NULL_TREE && TREE_CODE (t) == PARM_DECL); gcc_assert (TYPE_PTR_P (TREE_TYPE (t))); cp_function_chain->x_current_class_ref = cp_build_fold_indirect_ref (t); /* Set this second to avoid shortcut in cp_build_indirect_ref. */ cp_function_chain->x_current_class_ptr = t; /* Constructors and destructors need to know whether they're "in charge" of initializing virtual base classes. */ t = DECL_CHAIN (t); if (DECL_HAS_IN_CHARGE_PARM_P (decl1)) { current_in_charge_parm = t; t = DECL_CHAIN (t); } if (DECL_HAS_VTT_PARM_P (decl1)) { gcc_assert (DECL_NAME (t) == vtt_parm_identifier); current_vtt_parm = t; } } bool honor_interface = (!DECL_TEMPLATE_INSTANTIATION (decl1) /* Implicitly-defined methods (like the destructor for a class in which no destructor is explicitly declared) must not be defined until their definition is needed. So, we ignore interface specifications for compiler-generated functions. */ && !DECL_ARTIFICIAL (decl1)); struct c_fileinfo *finfo = get_fileinfo (LOCATION_FILE (DECL_SOURCE_LOCATION (decl1))); if (processing_template_decl) /* Don't mess with interface flags. */; else if (DECL_INTERFACE_KNOWN (decl1)) { tree ctx = decl_function_context (decl1); if (DECL_NOT_REALLY_EXTERN (decl1)) DECL_EXTERNAL (decl1) = 0; if (ctx != NULL_TREE && vague_linkage_p (ctx)) /* This is a function in a local class in an extern inline or template function. */ comdat_linkage (decl1); } /* If this function belongs to an interface, it is public. If it belongs to someone else's interface, it is also external. This only affects inlines and template instantiations. */ else if (!finfo->interface_unknown && honor_interface) { if (DECL_DECLARED_INLINE_P (decl1) || DECL_TEMPLATE_INSTANTIATION (decl1)) { DECL_EXTERNAL (decl1) = (finfo->interface_only || (DECL_DECLARED_INLINE_P (decl1) && ! flag_implement_inlines && !DECL_VINDEX (decl1))); /* For WIN32 we also want to put these in linkonce sections. */ maybe_make_one_only (decl1); } else DECL_EXTERNAL (decl1) = 0; DECL_INTERFACE_KNOWN (decl1) = 1; /* If this function is in an interface implemented in this file, make sure that the back end knows to emit this function here. */ if (!DECL_EXTERNAL (decl1)) mark_needed (decl1); } else if (finfo->interface_unknown && finfo->interface_only && honor_interface) { /* If MULTIPLE_SYMBOL_SPACES is defined and we saw a #pragma interface, we will have both finfo->interface_unknown and finfo->interface_only set. In that case, we don't want to use the normal heuristics because someone will supply a #pragma implementation elsewhere, and deducing it here would produce a conflict. */ comdat_linkage (decl1); DECL_EXTERNAL (decl1) = 0; DECL_INTERFACE_KNOWN (decl1) = 1; DECL_DEFER_OUTPUT (decl1) = 1; } else { /* This is a definition, not a reference. So clear DECL_EXTERNAL, unless this is a GNU extern inline. */ if (!GNU_INLINE_P (decl1)) DECL_EXTERNAL (decl1) = 0; if ((DECL_DECLARED_INLINE_P (decl1) || DECL_TEMPLATE_INSTANTIATION (decl1)) && ! DECL_INTERFACE_KNOWN (decl1)) DECL_DEFER_OUTPUT (decl1) = 1; else DECL_INTERFACE_KNOWN (decl1) = 1; } /* Determine the ELF visibility attribute for the function. We must not do this before calling "pushdecl", as we must allow "duplicate_decls" to merge any attributes appropriately. We also need to wait until linkage is set. */ if (!DECL_CLONED_FUNCTION_P (decl1)) determine_visibility (decl1); if (!processing_template_decl) maybe_instantiate_noexcept (decl1); begin_scope (sk_function_parms, decl1); ++function_depth; if (DECL_DESTRUCTOR_P (decl1) || (DECL_CONSTRUCTOR_P (decl1) && targetm.cxx.cdtor_returns_this ())) { cdtor_label = create_artificial_label (input_location); LABEL_DECL_CDTOR (cdtor_label) = true; } start_fname_decls (); store_parm_decls (current_function_parms); push_operator_bindings (); if (!processing_template_decl && (flag_lifetime_dse > 1) && DECL_CONSTRUCTOR_P (decl1) && !DECL_CLONED_FUNCTION_P (decl1) /* Clobbering an empty base is harmful if it overlays real data. */ && !is_empty_class (current_class_type) /* We can't clobber safely for an implicitly-defined default constructor because part of the initialization might happen before we enter the constructor, via AGGR_INIT_ZERO_FIRST (c++/68006). */ && !implicit_default_ctor_p (decl1)) finish_expr_stmt (build_clobber_this ()); if (!processing_template_decl && DECL_CONSTRUCTOR_P (decl1) && sanitize_flags_p (SANITIZE_VPTR) && !DECL_CLONED_FUNCTION_P (decl1) && !implicit_default_ctor_p (decl1)) cp_ubsan_maybe_initialize_vtbl_ptrs (current_class_ptr); if (!DECL_OMP_DECLARE_REDUCTION_P (decl1)) start_lambda_scope (decl1); return true; } /* Like start_preparsed_function, except that instead of a FUNCTION_DECL, this function takes DECLSPECS and DECLARATOR. Returns true on success. If the DECLARATOR is not suitable for a function, we return false, which tells the parser to skip the entire function. */ bool start_function (cp_decl_specifier_seq *declspecs, const cp_declarator *declarator, tree attrs) { tree decl1; decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, 1, &attrs); invoke_plugin_callbacks (PLUGIN_START_PARSE_FUNCTION, decl1); if (decl1 == error_mark_node) return false; if (DECL_MAIN_P (decl1)) /* main must return int. grokfndecl should have corrected it (and issued a diagnostic) if the user got it wrong. */ gcc_assert (same_type_p (TREE_TYPE (TREE_TYPE (decl1)), integer_type_node)); return start_preparsed_function (decl1, attrs, /*flags=*/SF_DEFAULT); } /* Returns true iff an EH_SPEC_BLOCK should be created in the body of FN. */ static bool use_eh_spec_block (tree fn) { return (flag_exceptions && flag_enforce_eh_specs && !processing_template_decl /* We insert the EH_SPEC_BLOCK only in the original function; then, it is copied automatically to the clones. */ && !DECL_CLONED_FUNCTION_P (fn) /* Implicitly-generated constructors and destructors have exception specifications. However, those specifications are the union of the possible exceptions specified by the constructors/destructors for bases and members, so no unallowed exception will ever reach this function. By not creating the EH_SPEC_BLOCK we save a little memory, and we avoid spurious warnings about unreachable code. */ && !DECL_DEFAULTED_FN (fn) && !type_throw_all_p (TREE_TYPE (fn))); } /* Helper function to push ARGS into the current lexical scope. DECL is the function declaration. NONPARMS is used to handle enum constants. */ void do_push_parm_decls (tree decl, tree args, tree *nonparms) { /* If we're doing semantic analysis, then we'll call pushdecl for each of these. We must do them in reverse order so that they end in the correct forward order. */ args = nreverse (args); tree next; for (tree parm = args; parm; parm = next) { next = DECL_CHAIN (parm); if (TREE_CODE (parm) == PARM_DECL) pushdecl (parm); else if (nonparms) { /* If we find an enum constant or a type tag, put it aside for the moment. */ TREE_CHAIN (parm) = NULL_TREE; *nonparms = chainon (*nonparms, parm); } } /* Get the decls in their original chain order and record in the function. This is all and only the PARM_DECLs that were pushed into scope by the loop above. */ DECL_ARGUMENTS (decl) = get_local_decls (); } /* Store the parameter declarations into the current function declaration. This is called after parsing the parameter declarations, before digesting the body of the function. Also install to binding contour return value identifier, if any. */ static void store_parm_decls (tree current_function_parms) { tree fndecl = current_function_decl; /* This is a chain of any other decls that came in among the parm declarations. If a parm is declared with enum {foo, bar} x; then CONST_DECLs for foo and bar are put here. */ tree nonparms = NULL_TREE; if (current_function_parms) { /* This case is when the function was defined with an ANSI prototype. The parms already have decls, so we need not do anything here except record them as in effect and complain if any redundant old-style parm decls were written. */ tree specparms = current_function_parms; /* Must clear this because it might contain TYPE_DECLs declared at class level. */ current_binding_level->names = NULL; do_push_parm_decls (fndecl, specparms, &nonparms); } else DECL_ARGUMENTS (fndecl) = NULL_TREE; /* Now store the final chain of decls for the arguments as the decl-chain of the current lexical scope. Put the enumerators in as well, at the front so that DECL_ARGUMENTS is not modified. */ current_binding_level->names = chainon (nonparms, DECL_ARGUMENTS (fndecl)); if (use_eh_spec_block (current_function_decl)) current_eh_spec_block = begin_eh_spec_block (); } /* Set the return value of the constructor (if present). */ static void finish_constructor_body (void) { tree val; tree exprstmt; if (targetm.cxx.cdtor_returns_this ()) { /* Any return from a constructor will end up here. */ add_stmt (build_stmt (input_location, LABEL_EXPR, cdtor_label)); val = DECL_ARGUMENTS (current_function_decl); val = build2 (MODIFY_EXPR, TREE_TYPE (val), DECL_RESULT (current_function_decl), val); /* Return the address of the object. */ exprstmt = build_stmt (input_location, RETURN_EXPR, val); add_stmt (exprstmt); } } /* Do all the processing for the beginning of a destructor; set up the vtable pointers and cleanups for bases and members. */ static void begin_destructor_body (void) { tree compound_stmt; /* If the CURRENT_CLASS_TYPE is incomplete, we will have already issued an error message. We still want to try to process the body of the function, but initialize_vtbl_ptrs will crash if TYPE_BINFO is NULL. */ if (COMPLETE_TYPE_P (current_class_type)) { compound_stmt = begin_compound_stmt (0); /* Make all virtual function table pointers in non-virtual base classes point to CURRENT_CLASS_TYPE's virtual function tables. */ initialize_vtbl_ptrs (current_class_ptr); finish_compound_stmt (compound_stmt); if (flag_lifetime_dse /* Clobbering an empty base is harmful if it overlays real data. */ && !is_empty_class (current_class_type)) { if (sanitize_flags_p (SANITIZE_VPTR) && (flag_sanitize_recover & SANITIZE_VPTR) == 0 && TYPE_CONTAINS_VPTR_P (current_class_type)) { tree binfo = TYPE_BINFO (current_class_type); tree ref = cp_build_fold_indirect_ref (current_class_ptr); tree vtbl_ptr = build_vfield_ref (ref, TREE_TYPE (binfo)); tree vtbl = build_zero_cst (TREE_TYPE (vtbl_ptr)); tree stmt = cp_build_modify_expr (input_location, vtbl_ptr, NOP_EXPR, vtbl, tf_warning_or_error); /* If the vptr is shared with some virtual nearly empty base, don't clear it if not in charge, the dtor of the virtual nearly empty base will do that later. */ if (CLASSTYPE_VBASECLASSES (current_class_type)) { tree c = current_class_type; while (CLASSTYPE_PRIMARY_BINFO (c)) { if (BINFO_VIRTUAL_P (CLASSTYPE_PRIMARY_BINFO (c))) { stmt = convert_to_void (stmt, ICV_STATEMENT, tf_warning_or_error); stmt = build_if_in_charge (stmt); break; } c = BINFO_TYPE (CLASSTYPE_PRIMARY_BINFO (c)); } } finish_decl_cleanup (NULL_TREE, stmt); } else finish_decl_cleanup (NULL_TREE, build_clobber_this ()); } /* And insert cleanups for our bases and members so that they will be properly destroyed if we throw. */ push_base_cleanups (); } } /* At the end of every destructor we generate code to delete the object if necessary. Do that now. */ static void finish_destructor_body (void) { tree exprstmt; /* Any return from a destructor will end up here; that way all base and member cleanups will be run when the function returns. */ add_stmt (build_stmt (input_location, LABEL_EXPR, cdtor_label)); if (targetm.cxx.cdtor_returns_this ()) { tree val; val = DECL_ARGUMENTS (current_function_decl); val = build2 (MODIFY_EXPR, TREE_TYPE (val), DECL_RESULT (current_function_decl), val); /* Return the address of the object. */ exprstmt = build_stmt (input_location, RETURN_EXPR, val); add_stmt (exprstmt); } } /* Do the necessary processing for the beginning of a function body, which in this case includes member-initializers, but not the catch clauses of a function-try-block. Currently, this means opening a binding level for the member-initializers (in a ctor), member cleanups (in a dtor), and capture proxies (in a lambda operator()). */ tree begin_function_body (void) { if (! FUNCTION_NEEDS_BODY_BLOCK (current_function_decl)) return NULL_TREE; if (processing_template_decl) /* Do nothing now. */; else /* Always keep the BLOCK node associated with the outermost pair of curly braces of a function. These are needed for correct operation of dwarfout.c. */ keep_next_level (true); tree stmt = begin_compound_stmt (BCS_FN_BODY); if (processing_template_decl) /* Do nothing now. */; else if (DECL_DESTRUCTOR_P (current_function_decl)) begin_destructor_body (); return stmt; } /* Do the processing for the end of a function body. Currently, this means closing out the cleanups for fully-constructed bases and members, and in the case of the destructor, deleting the object if desired. Again, this is only meaningful for [cd]tors, since they are the only functions where there is a significant distinction between the main body and any function catch clauses. Handling, say, main() return semantics here would be wrong, as flowing off the end of a function catch clause for main() would also need to return 0. */ void finish_function_body (tree compstmt) { if (compstmt == NULL_TREE) return; /* Close the block. */ finish_compound_stmt (compstmt); if (processing_template_decl) /* Do nothing now. */; else if (DECL_CONSTRUCTOR_P (current_function_decl)) finish_constructor_body (); else if (DECL_DESTRUCTOR_P (current_function_decl)) finish_destructor_body (); } /* Given a function, returns the BLOCK corresponding to the outermost level of curly braces, skipping the artificial block created for constructor initializers. */ tree outer_curly_brace_block (tree fndecl) { tree block = DECL_INITIAL (fndecl); if (BLOCK_OUTER_CURLY_BRACE_P (block)) return block; block = BLOCK_SUBBLOCKS (block); if (BLOCK_OUTER_CURLY_BRACE_P (block)) return block; block = BLOCK_SUBBLOCKS (block); gcc_assert (BLOCK_OUTER_CURLY_BRACE_P (block)); return block; } /* If FNDECL is a class's key method, add the class to the list of keyed classes that should be emitted. */ static void record_key_method_defined (tree fndecl) { if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fndecl) && DECL_VIRTUAL_P (fndecl) && !processing_template_decl) { tree fnclass = DECL_CONTEXT (fndecl); if (fndecl == CLASSTYPE_KEY_METHOD (fnclass)) vec_safe_push (keyed_classes, fnclass); } } /* Subroutine of finish_function. Save the body of constexpr functions for possible future compile time evaluation. */ static void maybe_save_function_definition (tree fun) { if (!processing_template_decl && DECL_DECLARED_CONSTEXPR_P (fun) && !cp_function_chain->invalid_constexpr && !DECL_CLONED_FUNCTION_P (fun)) register_constexpr_fundef (fun, DECL_SAVED_TREE (fun)); } /* Attempt to add a fix-it hint to RICHLOC suggesting the insertion of "return *this;" immediately before its location, using FNDECL's first statement (if any) to give the indentation, if appropriate. */ static void add_return_star_this_fixit (gcc_rich_location *richloc, tree fndecl) { location_t indent = UNKNOWN_LOCATION; tree stmts = expr_first (DECL_SAVED_TREE (fndecl)); if (stmts) indent = EXPR_LOCATION (stmts); richloc->add_fixit_insert_formatted ("return *this;", richloc->get_loc (), indent); } /* This function carries out the subset of finish_function operations needed to emit the compiler-generated outlined helper functions used by the coroutines implementation. */ static void emit_coro_helper (tree helper) { /* This is a partial set of the operations done by finish_function() plus emitting the result. */ set_cfun (NULL); current_function_decl = helper; begin_scope (sk_function_parms, NULL); store_parm_decls (DECL_ARGUMENTS (helper)); announce_function (helper); allocate_struct_function (helper, false); cfun->language = ggc_cleared_alloc<language_function> (); poplevel (1, 0, 1); maybe_save_function_definition (helper); /* We must start each function with a clear fold cache. */ clear_fold_cache (); cp_fold_function (helper); DECL_CONTEXT (DECL_RESULT (helper)) = helper; BLOCK_SUPERCONTEXT (DECL_INITIAL (helper)) = helper; /* This function has coroutine IFNs that we should handle in middle end lowering. */ cfun->coroutine_component = true; cp_genericize (helper); expand_or_defer_fn (helper); } /* Finish up a function declaration and compile that function all the way to assembler language output. The free the storage for the function definition. INLINE_P is TRUE if we just finished processing the body of an in-class inline function definition. (This processing will have taken place after the class definition is complete.) */ tree finish_function (bool inline_p) { tree fndecl = current_function_decl; tree fntype, ctype = NULL_TREE; tree resumer = NULL_TREE, destroyer = NULL_TREE; bool coro_p = flag_coroutines && !processing_template_decl && DECL_COROUTINE_P (fndecl); bool coro_emit_helpers = false; /* When we get some parse errors, we can end up without a current_function_decl, so cope. */ if (fndecl == NULL_TREE) return error_mark_node; if (!DECL_OMP_DECLARE_REDUCTION_P (fndecl)) finish_lambda_scope (); if (c_dialect_objc ()) objc_finish_function (); record_key_method_defined (fndecl); fntype = TREE_TYPE (fndecl); /* TREE_READONLY (fndecl) = 1; This caused &foo to be of type ptr-to-const-function which then got a warning when stored in a ptr-to-function variable. */ gcc_assert (building_stmt_list_p ()); /* The current function is being defined, so its DECL_INITIAL should be set, and unless there's a multiple definition, it should be error_mark_node. */ gcc_assert (DECL_INITIAL (fndecl) == error_mark_node); if (coro_p) { /* Only try to emit the coroutine outlined helper functions if the transforms succeeded. Otherwise, treat errors in the same way as a regular function. */ coro_emit_helpers = morph_fn_to_coro (fndecl, &resumer, &destroyer); /* We should handle coroutine IFNs in middle end lowering. */ cfun->coroutine_component = true; /* Do not try to process the ramp's EH unless outlining succeeded. */ if (coro_emit_helpers && use_eh_spec_block (fndecl)) finish_eh_spec_block (TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fndecl)), current_eh_spec_block); } else /* For a cloned function, we've already got all the code we need; there's no need to add any extra bits. */ if (!DECL_CLONED_FUNCTION_P (fndecl)) { /* Make it so that `main' always returns 0 by default. */ if (DECL_MAIN_P (current_function_decl)) finish_return_stmt (integer_zero_node); if (use_eh_spec_block (current_function_decl)) finish_eh_spec_block (TYPE_RAISES_EXCEPTIONS (TREE_TYPE (current_function_decl)), current_eh_spec_block); } /* If we're saving up tree structure, tie off the function now. */ DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl)); finish_fname_decls (); /* If this function can't throw any exceptions, remember that. */ if (!processing_template_decl && !cp_function_chain->can_throw && !flag_non_call_exceptions && !decl_replaceable_p (fndecl)) TREE_NOTHROW (fndecl) = 1; /* This must come after expand_function_end because cleanups might have declarations (from inline functions) that need to go into this function's blocks. */ /* If the current binding level isn't the outermost binding level for this function, either there is a bug, or we have experienced syntax errors and the statement tree is malformed. */ if (current_binding_level->kind != sk_function_parms) { /* Make sure we have already experienced errors. */ gcc_assert (errorcount); /* Throw away the broken statement tree and extra binding levels. */ DECL_SAVED_TREE (fndecl) = alloc_stmt_list (); while (current_binding_level->kind != sk_function_parms) { if (current_binding_level->kind == sk_class) pop_nested_class (); else poplevel (0, 0, 0); } } poplevel (1, 0, 1); /* Statements should always be full-expressions at the outermost set of curly braces for a function. */ gcc_assert (stmts_are_full_exprs_p ()); /* If there are no return statements in a function with auto return type, the return type is void. But if the declared type is something like auto*, this is an error. */ if (!processing_template_decl && FNDECL_USED_AUTO (fndecl) && TREE_TYPE (fntype) == DECL_SAVED_AUTO_RETURN_TYPE (fndecl)) { if (is_auto (DECL_SAVED_AUTO_RETURN_TYPE (fndecl)) && !current_function_returns_value && !current_function_returns_null) { /* We haven't applied return type deduction because we haven't seen any return statements. Do that now. */ tree node = type_uses_auto (DECL_SAVED_AUTO_RETURN_TYPE (fndecl)); do_auto_deduction (DECL_SAVED_AUTO_RETURN_TYPE (fndecl), void_node, node, tf_warning_or_error, adc_return_type); apply_deduced_return_type (fndecl, void_type_node); fntype = TREE_TYPE (fndecl); } else if (!current_function_returns_value && !current_function_returns_null) { error ("no return statements in function returning %qT", DECL_SAVED_AUTO_RETURN_TYPE (fndecl)); inform (input_location, "only plain %<auto%> return type can be " "deduced to %<void%>"); } } /* Remember that we were in class scope. */ if (current_class_name) ctype = current_class_type; if (DECL_DELETED_FN (fndecl)) { DECL_INITIAL (fndecl) = error_mark_node; DECL_SAVED_TREE (fndecl) = NULL_TREE; goto cleanup; } // If this is a concept, check that the definition is reasonable. if (DECL_DECLARED_CONCEPT_P (fndecl)) check_function_concept (fndecl); if (flag_openmp) if (tree attr = lookup_attribute ("omp declare variant base", DECL_ATTRIBUTES (fndecl))) omp_declare_variant_finalize (fndecl, attr); /* Complain if there's just no return statement. */ if ((warn_return_type || (cxx_dialect >= cxx14 && DECL_DECLARED_CONSTEXPR_P (fndecl))) && !VOID_TYPE_P (TREE_TYPE (fntype)) && !dependent_type_p (TREE_TYPE (fntype)) && !current_function_returns_value && !current_function_returns_null /* Don't complain if we abort or throw. */ && !current_function_returns_abnormally /* Don't complain if there's an infinite loop. */ && !current_function_infinite_loop /* Don't complain if we are declared noreturn. */ && !TREE_THIS_VOLATILE (fndecl) && !DECL_NAME (DECL_RESULT (fndecl)) && !TREE_NO_WARNING (fndecl) /* Structor return values (if any) are set by the compiler. */ && !DECL_CONSTRUCTOR_P (fndecl) && !DECL_DESTRUCTOR_P (fndecl) && targetm.warn_func_return (fndecl)) { gcc_rich_location richloc (input_location); /* Potentially add a "return *this;" fix-it hint for assignment operators. */ if (IDENTIFIER_ASSIGN_OP_P (DECL_NAME (fndecl))) { tree valtype = TREE_TYPE (DECL_RESULT (fndecl)); if (TREE_CODE (valtype) == REFERENCE_TYPE && current_class_ref && same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (valtype), TREE_TYPE (current_class_ref)) && global_dc->option_enabled (OPT_Wreturn_type, global_dc->lang_mask, global_dc->option_state)) add_return_star_this_fixit (&richloc, fndecl); } if (cxx_dialect >= cxx14 && DECL_DECLARED_CONSTEXPR_P (fndecl)) error_at (&richloc, "no return statement in %<constexpr%> function " "returning non-void"); else if (warning_at (&richloc, OPT_Wreturn_type, "no return statement in function returning " "non-void")) TREE_NO_WARNING (fndecl) = 1; } /* Lambda closure members are implicitly constexpr if possible. */ if (cxx_dialect >= cxx17 && LAMBDA_TYPE_P (CP_DECL_CONTEXT (fndecl))) DECL_DECLARED_CONSTEXPR_P (fndecl) = ((processing_template_decl || is_valid_constexpr_fn (fndecl, /*complain*/false)) && potential_constant_expression (DECL_SAVED_TREE (fndecl))); /* Save constexpr function body before it gets munged by the NRV transformation. */ maybe_save_function_definition (fndecl); /* Invoke the pre-genericize plugin before we start munging things. */ if (!processing_template_decl) invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl); /* Perform delayed folding before NRV transformation. */ if (!processing_template_decl && !DECL_IMMEDIATE_FUNCTION_P (fndecl) && !DECL_OMP_DECLARE_REDUCTION_P (fndecl)) cp_fold_function (fndecl); /* Set up the named return value optimization, if we can. Candidate variables are selected in check_return_expr. */ if (current_function_return_value) { tree r = current_function_return_value; tree outer; if (r != error_mark_node /* This is only worth doing for fns that return in memory--and simpler, since we don't have to worry about promoted modes. */ && aggregate_value_p (TREE_TYPE (TREE_TYPE (fndecl)), fndecl) /* Only allow this for variables declared in the outer scope of the function so we know that their lifetime always ends with a return; see g++.dg/opt/nrv6.C. We could be more flexible if we were to do this optimization in tree-ssa. */ && (outer = outer_curly_brace_block (fndecl)) && chain_member (r, BLOCK_VARS (outer))) finalize_nrv (&DECL_SAVED_TREE (fndecl), r, DECL_RESULT (fndecl)); current_function_return_value = NULL_TREE; } /* Must mark the RESULT_DECL as being in this function. */ DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; /* Set the BLOCK_SUPERCONTEXT of the outermost function scope to point to the FUNCTION_DECL node itself. */ BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; /* Store the end of the function, so that we get good line number info for the epilogue. */ cfun->function_end_locus = input_location; /* Complain about parameters that are only set, but never otherwise used. */ if (warn_unused_but_set_parameter && !processing_template_decl && errorcount == unused_but_set_errorcount && !DECL_CLONED_FUNCTION_P (fndecl)) { tree decl; for (decl = DECL_ARGUMENTS (fndecl); decl; decl = DECL_CHAIN (decl)) if (TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL && !DECL_READ_P (decl) && DECL_NAME (decl) && !DECL_ARTIFICIAL (decl) && !TREE_NO_WARNING (decl) && !DECL_IN_SYSTEM_HEADER (decl) && TREE_TYPE (decl) != error_mark_node && !TYPE_REF_P (TREE_TYPE (decl)) && (!CLASS_TYPE_P (TREE_TYPE (decl)) || !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (TREE_TYPE (decl)))) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_parameter, "parameter %qD set but not used", decl); unused_but_set_errorcount = errorcount; } /* Complain about locally defined typedefs that are not used in this function. */ maybe_warn_unused_local_typedefs (); /* Possibly warn about unused parameters. */ if (warn_unused_parameter && !processing_template_decl && !DECL_CLONED_FUNCTION_P (fndecl)) do_warn_unused_parameter (fndecl); /* Genericize before inlining. */ if (!processing_template_decl && !DECL_IMMEDIATE_FUNCTION_P (fndecl) && !DECL_OMP_DECLARE_REDUCTION_P (fndecl)) cp_genericize (fndecl); /* Emit the resumer and destroyer functions now, providing that we have not encountered some fatal error. */ if (coro_emit_helpers) { emit_coro_helper (resumer); emit_coro_helper (destroyer); } cleanup: /* We're leaving the context of this function, so zap cfun. It's still in DECL_STRUCT_FUNCTION, and we'll restore it in tree_rest_of_compilation. */ set_cfun (NULL); current_function_decl = NULL; /* If this is an in-class inline definition, we may have to pop the bindings for the template parameters that we added in maybe_begin_member_template_processing when start_function was called. */ if (inline_p) maybe_end_member_template_processing (); /* Leave the scope of the class. */ if (ctype) pop_nested_class (); --function_depth; /* Clean up. */ current_function_decl = NULL_TREE; invoke_plugin_callbacks (PLUGIN_FINISH_PARSE_FUNCTION, fndecl); return fndecl; } /* Create the FUNCTION_DECL for a function definition. DECLSPECS and DECLARATOR are the parts of the declaration; they describe the return type and the name of the function, but twisted together in a fashion that parallels the syntax of C. This function creates a binding context for the function body as well as setting up the FUNCTION_DECL in current_function_decl. Returns a FUNCTION_DECL on success. If the DECLARATOR is not suitable for a function (it defines a datum instead), we return 0, which tells yyparse to report a parse error. May return void_type_node indicating that this method is actually a friend. See grokfield for more details. Came here with a `.pushlevel' . DO NOT MAKE ANY CHANGES TO THIS CODE WITHOUT MAKING CORRESPONDING CHANGES TO CODE IN `grokfield'. */ tree grokmethod (cp_decl_specifier_seq *declspecs, const cp_declarator *declarator, tree attrlist) { tree fndecl = grokdeclarator (declarator, declspecs, MEMFUNCDEF, 0, &attrlist); if (fndecl == error_mark_node) return error_mark_node; if (attrlist) cplus_decl_attributes (&fndecl, attrlist, 0); /* Pass friends other than inline friend functions back. */ if (fndecl == void_type_node) return fndecl; if (DECL_IN_AGGR_P (fndecl)) { if (DECL_CLASS_SCOPE_P (fndecl)) error ("%qD is already defined in class %qT", fndecl, DECL_CONTEXT (fndecl)); return error_mark_node; } check_template_shadow (fndecl); if (TREE_PUBLIC (fndecl)) DECL_COMDAT (fndecl) = 1; DECL_DECLARED_INLINE_P (fndecl) = 1; DECL_NO_INLINE_WARNING_P (fndecl) = 1; /* We process method specializations in finish_struct_1. */ if (processing_template_decl && !DECL_TEMPLATE_SPECIALIZATION (fndecl)) { /* Avoid calling decl_spec_seq... until we have to. */ bool friendp = decl_spec_seq_has_spec_p (declspecs, ds_friend); fndecl = push_template_decl (fndecl, friendp); if (fndecl == error_mark_node) return fndecl; } if (DECL_CHAIN (fndecl) && !decl_spec_seq_has_spec_p (declspecs, ds_friend)) { fndecl = copy_node (fndecl); TREE_CHAIN (fndecl) = NULL_TREE; } cp_finish_decl (fndecl, NULL_TREE, false, NULL_TREE, 0); DECL_IN_AGGR_P (fndecl) = 1; return fndecl; } /* VAR is a VAR_DECL. If its type is incomplete, remember VAR so that we can lay it out later, when and if its type becomes complete. Also handle constexpr variables where the initializer involves an unlowered PTRMEM_CST because the class isn't complete yet. */ void maybe_register_incomplete_var (tree var) { gcc_assert (VAR_P (var)); /* Keep track of variables with incomplete types. */ if (!processing_template_decl && TREE_TYPE (var) != error_mark_node && DECL_EXTERNAL (var)) { tree inner_type = TREE_TYPE (var); while (TREE_CODE (inner_type) == ARRAY_TYPE) inner_type = TREE_TYPE (inner_type); inner_type = TYPE_MAIN_VARIANT (inner_type); if ((!COMPLETE_TYPE_P (inner_type) && CLASS_TYPE_P (inner_type)) /* RTTI TD entries are created while defining the type_info. */ || (TYPE_LANG_SPECIFIC (inner_type) && TYPE_BEING_DEFINED (inner_type))) { incomplete_var iv = {var, inner_type}; vec_safe_push (incomplete_vars, iv); } else if (!(DECL_LANG_SPECIFIC (var) && DECL_TEMPLATE_INFO (var)) && decl_constant_var_p (var) && (TYPE_PTRMEM_P (inner_type) || CLASS_TYPE_P (inner_type))) { /* When the outermost open class is complete we can resolve any pointers-to-members. */ tree context = outermost_open_class (); incomplete_var iv = {var, context}; vec_safe_push (incomplete_vars, iv); } } } /* Called when a class type (given by TYPE) is defined. If there are any existing VAR_DECLs whose type has been completed by this declaration, update them now. */ void complete_vars (tree type) { unsigned ix; incomplete_var *iv; for (ix = 0; vec_safe_iterate (incomplete_vars, ix, &iv); ) { if (same_type_p (type, iv->incomplete_type)) { tree var = iv->decl; tree type = TREE_TYPE (var); if (type != error_mark_node && (TYPE_MAIN_VARIANT (strip_array_types (type)) == iv->incomplete_type)) { /* Complete the type of the variable. */ complete_type (type); cp_apply_type_quals_to_decl (cp_type_quals (type), var); if (COMPLETE_TYPE_P (type)) layout_var_decl (var); } /* Remove this entry from the list. */ incomplete_vars->unordered_remove (ix); } else ix++; } /* Check for pending declarations which may have abstract type. */ complete_type_check_abstract (type); } /* If DECL is of a type which needs a cleanup, build and return an expression to perform that cleanup here. Return NULL_TREE if no cleanup need be done. DECL can also be a _REF when called from split_nonconstant_init_1. */ tree cxx_maybe_build_cleanup (tree decl, tsubst_flags_t complain) { tree type; tree attr; tree cleanup; /* Assume no cleanup is required. */ cleanup = NULL_TREE; if (error_operand_p (decl)) return cleanup; /* Handle "__attribute__((cleanup))". We run the cleanup function before the destructor since the destructor is what actually terminates the lifetime of the object. */ if (DECL_P (decl)) attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl)); else attr = NULL_TREE; if (attr) { tree id; tree fn; tree arg; /* Get the name specified by the user for the cleanup function. */ id = TREE_VALUE (TREE_VALUE (attr)); /* Look up the name to find the cleanup function to call. It is important to use lookup_name here because that is what is used in c-common.c:handle_cleanup_attribute when performing initial checks on the attribute. Note that those checks include ensuring that the function found is not an overloaded function, or an object with an overloaded call operator, etc.; we can rely on the fact that the function found is an ordinary FUNCTION_DECL. */ fn = lookup_name (id); arg = build_address (decl); if (!mark_used (decl, complain) && !(complain & tf_error)) return error_mark_node; cleanup = cp_build_function_call_nary (fn, complain, arg, NULL_TREE); if (cleanup == error_mark_node) return error_mark_node; } /* Handle ordinary C++ destructors. */ type = TREE_TYPE (decl); if (type_build_dtor_call (type)) { int flags = LOOKUP_NORMAL|LOOKUP_NONVIRTUAL|LOOKUP_DESTRUCTOR; tree addr; tree call; if (TREE_CODE (type) == ARRAY_TYPE) addr = decl; else addr = build_address (decl); call = build_delete (input_location, TREE_TYPE (addr), addr, sfk_complete_destructor, flags, 0, complain); if (call == error_mark_node) cleanup = error_mark_node; else if (TYPE_HAS_TRIVIAL_DESTRUCTOR (type)) /* Discard the call. */; else if (decl_maybe_constant_destruction (decl, type) && DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl)) cxx_constant_dtor (call, decl); else if (cleanup) cleanup = cp_build_compound_expr (cleanup, call, complain); else cleanup = call; } /* build_delete sets the location of the destructor call to the current location, even though the destructor is going to be called later, at the end of the current scope. This can lead to a "jumpy" behavior for users of debuggers when they step around the end of the block. So let's unset the location of the destructor call instead. */ protected_set_expr_location (cleanup, UNKNOWN_LOCATION); if (cleanup && CONVERT_EXPR_P (cleanup)) protected_set_expr_location (TREE_OPERAND (cleanup, 0), UNKNOWN_LOCATION); if (cleanup && DECL_P (decl) && !lookup_attribute ("warn_unused", TYPE_ATTRIBUTES (TREE_TYPE (decl))) /* Treat objects with destructors as used; the destructor may do something substantive. */ && !mark_used (decl, complain) && !(complain & tf_error)) return error_mark_node; if (cleanup && cfun && !processing_template_decl && !expr_noexcept_p (cleanup, tf_none)) cp_function_chain->throwing_cleanup = true; return cleanup; } /* Return the FUNCTION_TYPE that corresponds to MEMFNTYPE, which can be a FUNCTION_DECL, METHOD_TYPE, FUNCTION_TYPE, pointer or reference to METHOD_TYPE or FUNCTION_TYPE, or pointer to member function. */ tree static_fn_type (tree memfntype) { tree fntype; tree args; if (TYPE_PTRMEMFUNC_P (memfntype)) memfntype = TYPE_PTRMEMFUNC_FN_TYPE (memfntype); if (INDIRECT_TYPE_P (memfntype) || TREE_CODE (memfntype) == FUNCTION_DECL) memfntype = TREE_TYPE (memfntype); if (TREE_CODE (memfntype) == FUNCTION_TYPE) return memfntype; gcc_assert (TREE_CODE (memfntype) == METHOD_TYPE); args = TYPE_ARG_TYPES (memfntype); fntype = build_function_type (TREE_TYPE (memfntype), TREE_CHAIN (args)); fntype = apply_memfn_quals (fntype, type_memfn_quals (memfntype)); fntype = (cp_build_type_attribute_variant (fntype, TYPE_ATTRIBUTES (memfntype))); fntype = cxx_copy_lang_qualifiers (fntype, memfntype); return fntype; } /* DECL was originally constructed as a non-static member function, but turned out to be static. Update it accordingly. */ void revert_static_member_fn (tree decl) { tree stype = static_fn_type (decl); cp_cv_quals quals = type_memfn_quals (stype); cp_ref_qualifier rqual = type_memfn_rqual (stype); if (quals != TYPE_UNQUALIFIED || rqual != REF_QUAL_NONE) stype = apply_memfn_quals (stype, TYPE_UNQUALIFIED, REF_QUAL_NONE); TREE_TYPE (decl) = stype; if (DECL_ARGUMENTS (decl)) DECL_ARGUMENTS (decl) = DECL_CHAIN (DECL_ARGUMENTS (decl)); DECL_STATIC_FUNCTION_P (decl) = 1; } /* Return which tree structure is used by T, or TS_CP_GENERIC if T is one of the language-independent trees. */ enum cp_tree_node_structure_enum cp_tree_node_structure (union lang_tree_node * t) { switch (TREE_CODE (&t->generic)) { case ARGUMENT_PACK_SELECT: return TS_CP_ARGUMENT_PACK_SELECT; case BASELINK: return TS_CP_BASELINK; case CONSTRAINT_INFO: return TS_CP_CONSTRAINT_INFO; case DEFERRED_NOEXCEPT: return TS_CP_DEFERRED_NOEXCEPT; case DEFERRED_PARSE: return TS_CP_DEFERRED_PARSE; case IDENTIFIER_NODE: return TS_CP_IDENTIFIER; case LAMBDA_EXPR: return TS_CP_LAMBDA_EXPR; case OVERLOAD: return TS_CP_OVERLOAD; case PTRMEM_CST: return TS_CP_PTRMEM; case STATIC_ASSERT: return TS_CP_STATIC_ASSERT; case TEMPLATE_DECL: return TS_CP_TEMPLATE_DECL; case TEMPLATE_INFO: return TS_CP_TEMPLATE_INFO; case TEMPLATE_PARM_INDEX: return TS_CP_TPI; case TRAIT_EXPR: return TS_CP_TRAIT_EXPR; case USERDEF_LITERAL: return TS_CP_USERDEF_LITERAL; default: return TS_CP_GENERIC; } } /* Build the void_list_node (void_type_node having been created). */ tree build_void_list_node (void) { tree t = build_tree_list (NULL_TREE, void_type_node); return t; } bool cp_missing_noreturn_ok_p (tree decl) { /* A missing noreturn is ok for the `main' function. */ return DECL_MAIN_P (decl); } /* Return the decl used to identify the COMDAT group into which DECL should be placed. */ tree cxx_comdat_group (tree decl) { /* Virtual tables, construction virtual tables, and virtual table tables all go in a single COMDAT group, named after the primary virtual table. */ if (VAR_P (decl) && DECL_VTABLE_OR_VTT_P (decl)) decl = CLASSTYPE_VTABLES (DECL_CONTEXT (decl)); /* For all other DECLs, the COMDAT group is the mangled name of the declaration itself. */ else { while (DECL_THUNK_P (decl)) { /* If TARGET_USE_LOCAL_THUNK_ALIAS_P, use_thunk puts the thunk into the same section as the target function. In that case we must return target's name. */ tree target = THUNK_TARGET (decl); if (TARGET_USE_LOCAL_THUNK_ALIAS_P (target) && DECL_SECTION_NAME (target) != NULL && DECL_ONE_ONLY (target)) decl = target; else break; } } return decl; } /* Returns the return type for FN as written by the user, which may include a placeholder for a deduced return type. */ tree fndecl_declared_return_type (tree fn) { fn = STRIP_TEMPLATE (fn); if (FNDECL_USED_AUTO (fn)) return DECL_SAVED_AUTO_RETURN_TYPE (fn); return TREE_TYPE (TREE_TYPE (fn)); } /* Returns true iff DECL is a variable or function declared with an auto type that has not yet been deduced to a real type. */ bool undeduced_auto_decl (tree decl) { if (cxx_dialect < cxx11) return false; STRIP_ANY_LOCATION_WRAPPER (decl); return ((VAR_OR_FUNCTION_DECL_P (decl) || TREE_CODE (decl) == TEMPLATE_DECL) && type_uses_auto (TREE_TYPE (decl))); } /* Complain if DECL has an undeduced return type. */ bool require_deduced_type (tree decl, tsubst_flags_t complain) { if (undeduced_auto_decl (decl)) { if (TREE_NO_WARNING (decl) && seen_error ()) /* We probably already complained about deduction failure. */; else if (complain & tf_error) error ("use of %qD before deduction of %<auto%>", decl); return false; } return true; } /* Create a representation of the explicit-specifier with constant-expression of EXPR. COMPLAIN is as for tsubst. */ tree build_explicit_specifier (tree expr, tsubst_flags_t complain) { if (instantiation_dependent_expression_p (expr)) /* Wait for instantiation, tsubst_function_decl will handle it. */ return expr; expr = build_converted_constant_bool_expr (expr, complain); expr = instantiate_non_dependent_expr_sfinae (expr, complain); expr = cxx_constant_value (expr); return expr; } #include "gt-cp-decl.h"
core_slange.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlange.c, normal z -> s, Fri Sep 28 17:38:21 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /***************************************************************************//** * * @ingroup core_lange * * Calculates max, one, infinity or Frobenius norm of a given matrix. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] m * The number of rows of the matrix A. m >= 0. When m = 0, * the returned value is set to zero. * * @param[in] n * The number of columns of the matrix A. n >= 0. When n = 0, * the returned value is set to zero. * * @param[in] A * The m-by-n matrix A. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[in] work * The auxiliary work array. * * @param[out] value * The specified norm of the given matrix A * ******************************************************************************/ __attribute__((weak)) void plasma_core_slange(plasma_enum_t norm, int m, int n, const float *A, int lda, float *work, float *value) { *value = LAPACKE_slange_work(LAPACK_COL_MAJOR, lapack_const(norm), m, n, A, lda, work); } /******************************************************************************/ void plasma_core_omp_slange(plasma_enum_t norm, int m, int n, const float *A, int lda, float *work, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) plasma_core_slange(norm, m, n, A, lda, work, value); } } /******************************************************************************/ void plasma_core_omp_slange_aux(plasma_enum_t norm, int m, int n, const float *A, int lda, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { switch (norm) { case PlasmaOneNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:n]) { if (sequence->status == PlasmaSuccess) { for (int j = 0; j < n; j++) { value[j] = fabsf(A[lda*j]); for (int i = 1; i < m; i++) { value[j] += fabsf(A[lda*j+i]); } } } } break; case PlasmaInfNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:m]) { if (sequence->status == PlasmaSuccess) { for (int i = 0; i < m; i++) value[i] = 0.0; for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { value[i] += fabsf(A[lda*j+i]); } } } } break; } }
Tutorial.h
//================================================================================================= /*! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // // Copyright (C) 2012-2020 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // // BLAZE TUTORIAL // //================================================================================================= //**Mainpage*************************************************************************************** /*!\mainpage // // \image html blaze300x150.jpg // // This is the API for the \b Blaze high performance C++ math library. It gives a complete // overview of the individual features and sublibraries of \b Blaze. To get a first impression // on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards, // the following long tutorial covers the most important aspects of the \b Blaze math library. // The tabs at the top of the page allow a direct access to the individual modules, namespaces, // classes, and files of the \b Blaze library.\n\n // // \section table_of_content Table of Contents // // <ul> // <li> \ref configuration_and_installation </li> // <li> \ref getting_started </li> // <li> \ref vectors // <ul> // <li> \ref vector_types // <ul> // <li> \ref vector_types_dense_vectors </li> // <li> \ref vector_types_sparse_vectors </li> // </ul> // </li> // <li> \ref vector_operations // <ul> // <li> \ref vector_operations_constructors </li> // <li> \ref vector_operations_assignment </li> // <li> \ref vector_operations_element_access </li> // <li> \ref vector_operations_element_insertion </li> // <li> \ref vector_operations_element_removal </li> // <li> \ref vector_operations_element_lookup </li> // <li> \ref vector_operations_non_modifying_operations </li> // <li> \ref vector_operations_modifying_operations </li> // <li> \ref vector_operations_arithmetic_operations </li> // <li> \ref vector_operations_reduction_operations </li> // <li> \ref vector_operations_norms </li> // <li> \ref vector_operations_scalar_expansion </li> // <li> \ref vector_operations_vector_expansion </li> // <li> \ref vector_operations_vector_repetition </li> // <li> \ref vector_operations_statistic_operations </li> // <li> \ref vector_operations_declaration_operations </li> // <li> \ref vector_operations_vector_generators </li> // </ul> // </li> // </ul> // </li> // <li> \ref matrices // <ul> // <li> \ref matrix_types // <ul> // <li> \ref matrix_types_dense_matrices </li> // <li> \ref matrix_types_sparse_matrices </li> // </ul> // </li> // <li> \ref matrix_operations // <ul> // <li> \ref matrix_operations_constructors </li> // <li> \ref matrix_operations_assignment </li> // <li> \ref matrix_operations_element_access </li> // <li> \ref matrix_operations_element_insertion </li> // <li> \ref matrix_operations_element_removal </li> // <li> \ref matrix_operations_element_lookup </li> // <li> \ref matrix_operations_non_modifying_operations </li> // <li> \ref matrix_operations_modifying_operations </li> // <li> \ref matrix_operations_arithmetic_operations </li> // <li> \ref matrix_operations_reduction_operations </li> // <li> \ref matrix_operations_norms </li> // <li> \ref matrix_operations_scalar_expansion </li> // <li> \ref matrix_operations_matrix_repetition </li> // <li> \ref matrix_operations_statistic_operations </li> // <li> \ref matrix_operations_declaration_operations </li> // <li> \ref matrix_operations_matrix_generators </li> // <li> \ref matrix_operations_matrix_inversion </li> // <li> \ref matrix_operations_matrix_exponential </li> // <li> \ref matrix_operations_decomposition </li> // <li> \ref matrix_operations_linear_systems </li> // <li> \ref matrix_operations_eigenvalues </li> // <li> \ref matrix_operations_singularvalues </li> // </ul> // </li> // </ul> // </li> // <li> \ref adaptors // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices </li> // </ul> // </li> // <li> \ref views // <ul> // <li> \ref views_subvectors </li> // <li> \ref views_element_selections </li> // <li> \ref views_submatrices </li> // <li> \ref views_rows </li> // <li> \ref views_row_selections </li> // <li> \ref views_columns </li> // <li> \ref views_column_selections </li> // <li> \ref views_bands </li> // </ul> // </li> // <li> \ref arithmetic_operations // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // <li> \ref vector_kronecker_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication // <ul> // <li> \ref schur_product </li> // <li> \ref matrix_product </li> // <li> \ref matrix_kronecker_product </li> // </ul> // </li> // </ul> // </li> // <li> \ref bitwise_operations // <ul> // <li> \ref bitwise_shift </li> // <li> \ref bitwise_and </li> // <li> \ref bitwise_or </li> // <li> \ref bitwise_xor </li> // </ul> // </li> // <li> \ref logical_operations // <ul> // <li> \ref logical_not </li> // <li> \ref logical_and </li> // <li> \ref logical_or </li> // </ul> // </li> // <li> \ref shared_memory_parallelization // <ul> // <li> \ref hpx_parallelization </li> // <li> \ref cpp_threads_parallelization </li> // <li> \ref boost_threads_parallelization </li> // <li> \ref openmp_parallelization </li> // <li> \ref serial_execution </li> // </ul> // </li> // <li> \ref serialization // <ul> // <li> \ref vector_serialization </li> // <li> \ref matrix_serialization </li> // </ul> // </li> // <li> \ref customization // <ul> // <li> \ref configuration_files </li> // <li> \ref vector_and_matrix_customization // <ul> // <li> \ref custom_data_members </li> // <li> \ref custom_operations </li> // <li> \ref custom_data_types </li> // </ul> // </li> // <li> \ref grouping_tagging </li> // <li> \ref error_reporting_customization </li> // </ul> // </li> // <li> \ref blas_functions </li> // <li> \ref lapack_functions </li> // <li> \ref block_vectors_and_matrices </li> // <li> \ref intra_statement_optimization </li> // <li> \ref faq </li> // <li> \ref issue_creation_guidelines </li> // <li> \ref blaze_references </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation***************************************************************** /*!\page configuration_and_installation Configuration and Installation // // \tableofcontents // // // Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system // is a fairly easy two step process. In the following, this two step process is explained in // detail, preceded only by a short summary of the requirements. // // // \n \section requirements Requirements // <hr> // // For maximum performance the \b Blaze library expects you to have a BLAS library installed // (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a href="http://math-atlas.sourceforge.net">Atlas</a>, // <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't // have a BLAS library installed on your system, \b Blaze will still work and will not be reduced // in functionality, but performance may be limited. Thus it is strongly recommended to install a // BLAS library. // // Additionally, for computing the determinant of a dense matrix, for the decomposition of dense // matrices, for the dense matrix inversion, and for the computation of eigenvalues and singular // values \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either // of these features is used it is necessary to link the LAPACK library to the final executable. // If no LAPACK library is available the use of these features will result in a linker error. // // Furthermore, it is possible to use Boost threads to run numeric operations in parallel. In this // case the Boost library is required to be installed on your system. It is recommended to use the // newest Boost library available, but \b Blaze requires at minimum the Boost version 1.54.0. If // you don't have Boost installed on your system, you can download it for free from // <a href="http://www.boost.org">www.boost.org</a>. // // // \n \section step_1_installation Step 1: Installation // <hr> // // \subsection step_1_cmake Installation via CMake // // The first step is the installation of the \b Blaze header files. The most convenient way // to do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS users can use the // following two lines to copy the \b Blaze headers in the <tt>./blaze</tt> subdirectory to // the directory \c ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to // \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake. \code cmake -DCMAKE_INSTALL_PREFIX=/usr/local/ sudo make install \endcode // Windows users can do the same via the cmake-gui. Alternatively, it is possible to include // \b Blaze by adding the following lines in any \c CMakeLists.txt file: \code find_package( blaze ) if( blaze_FOUND ) add_library( blaze_target INTERFACE ) target_link_libraries( blaze_target INTERFACE blaze::blaze ) endif() \endcode // Alternatively \b Blaze provides the <tt>./cmake/Blaze_Import</tt> CMake function to import // the \b Blaze library into CMake based projects. This approach includes the configuration // step (see \ref step_2_configuration). To do so you need to import the function file like // any other module/function into your CMake project: \code list(APPEND CMAKE_MODULE_PATH ${BLAZE_LIBRARY_PATH}/cmake) include(Blaze_Import) \endcode // After importing the function script you can import and use the \b Blaze library: \code Blaze_Import(ARGUMENTS) target_link_libraries(TARGET Blaze) \endcode // In this example, \c TARGET is the executable/library using \b Blaze and \c ARGUMENTS is the // configuration you want for building \b Blaze. To configure \b Blaze using the import function // you can set the input arguments like this example: \code Blaze_Import( QUIET BLAS on LAPACK on THREADING Boost CACHE_SIZE auto VECTORIZATION on STORAGE_ORDER rowMajor THRESHOLD_DMATDVECMULT 100000UL THRESHOLD_SMP_DVECDVECADD 1000000UL ) \endcode // For more details about available configuration options please have a look at // \ref configuration_files and the <tt>Blaze_Import.cmake</tt> function script. // // \n \subsection step_1_vcpkg Installation via the VC++ Packaging Tool // // An alternate way to install \b Blaze for Windows users is Microsoft's // <a href="https://github.com/Microsoft/vcpkg">VC++ Packaging Tool (vcpkg)</a>. \b Blaze can // be installed via the command line: \code C:\src\vcpkg> .\vcpkg install blaze \endcode // The tool automatically downloads the latest \b Blaze release and copies the header files to // the common include directory. Please note that since \b Blaze is a header-only library the // attempt to install any static or dynamic library will fail! // // \n \subsection step_1_installation_unix Manual Installation on Linux/macOS // // Since \b Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can be simply // copied to a standard include directory (note that this requires root privileges): \code cp -r ./blaze /usr/local/include \endcode // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the // \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be // searched after any directories specified on the command line with the option \c -I and // before the standard default directories (such as \c /usr/local/include and \c /usr/include). // Assuming a user named 'Jon', the environment variable can be set as follows: \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH \endcode // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the // command line. The following example demonstrates this by means of the GNU C++ compiler: \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode // \n \subsection step_1_installation_windows Manual Installation on Windows // // Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be // copied to any other directory or simply left in the default \b Blaze directory. However, the // chosen include directory has to be explicitly specified as include path. In Visual Studio, // this is done via the project property pages, configuration properties, C/C++, General settings. // Here the additional include directories can be specified. // // // \n \section step_2_configuration Step 2: Configuration // <hr> // // The second step is the configuration and customization of the \b Blaze library. Many aspects // of \b Blaze can be adapted to specific requirements, environments and architectures. The most // convenient way to configure \b Blaze is to modify the headers in the <tt>./blaze/config/</tt> // subdirectory by means of <a href="https://cmake.org">CMake</a>. Alternatively these header // files can be customized manually. In both cases, however, the files are modified. If this is // not an option it is possible to configure \b Blaze via the command line (see the tutorial // section \ref configuration_files or the documentation in the configuration files). // // Since the default settings are reasonable for most systems this step can also be skipped. // However, in order to achieve maximum performance a customization of at least the following // configuration files is required: // // - <b><tt><blaze/config/BLAS.h></tt></b>: Via this configuration file \b Blaze can be enabled // to use a third-party BLAS library for several basic linear algebra functions (such as for // instance dense matrix multiplications). In case no BLAS library is used, all linear algebra // functions use the default implementations of the \b Blaze library and therefore BLAS is not a // requirement for the compilation process. However, please note that performance may be limited. // - <b><tt><blaze/config/CacheSize.h></tt></b>: This file contains the hardware specific cache // settings. \b Blaze uses this information to optimize its cache usage. For maximum performance // it is recommended to adapt these setting to a specific target architecture. // - <b><tt><blaze/config/Thresholds.h></tt></b>: This file contains all thresholds for the // customization of the \b Blaze compute kernels. In order to tune the kernels for a specific // architecture and to maximize performance it can be necessary to adjust the thresholds, // especially for a parallel execution (see \ref shared_memory_parallelization). // // For an overview of other customization options and more details, please see the section // \ref configuration_files. // // // \n \section blaze_version Blaze Version // <hr> // // The current major and minor version number of the \b Blaze library can be found in the // <b><tt><blaze/system/Version.h></tt></b> header file. It is automatically included via the // <b><tt><blaze/Blaze.h></tt></b> header file. The file contains the two following macros, // which can for instance be used for conditional compilation: \code #define BLAZE_MAJOR_VERSION 3 #define BLAZE_MINOR_VERSION 9 #define BLAZE_PATCH_VERSION 0 \endcode // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started******************************************************************************** /*!\page getting_started Getting Started // // This short tutorial serves the purpose to give a quick overview of the way mathematical // expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following // long tutorial covers the most important aspects of the \b Blaze math library. // // // \n \section getting_started_vector_example A First Example // // \b Blaze is written such that using mathematical expressions is as close to mathematical // textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly // easiest solution is the right solution and most users experience no problems when trying to // use \b Blaze in the most natural way. The following example gives a first impression of the // formulation of a vector addition in \b Blaze: \code #include <iostream> #include <blaze/Math.h> using blaze::StaticVector; using blaze::DynamicVector; int main() { // Instantiation of a static 3D column vector. The vector is directly initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; // Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = 2; b[1] = 5; b[2] = -3; // Adding the vectors a and b DynamicVector<int> c = a + b; // Printing the result of the vector addition std::cout << "c =\n" << c << "\n"; } \endcode // Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header // file. Alternatively, the entire \b Blaze library, including both the math and the entire // utility module, can be included via the \c blaze/Blaze.h header file. Also note that all // classes and functions of \b Blaze are contained in the blaze namespace.\n\n // // Assuming that this program resides in a source file called \c FirstExample.cpp, it can be // compiled for instance via the GNU C++ compiler: \code g++ -std=c++14 -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode // Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum // performance, it is necessary to compile the program in release mode, which deactivates // all debugging functionality inside \b Blaze. It is also strongly recommended to specify // the available architecture specific instruction set (as for instance the AVX instruction // set, which if available can be activated via the \c -mavx flag). This allows \b Blaze // to optimize computations via vectorization.\n\n // // When running the resulting executable \c FirstExample, the output of the last line of // this small program is \code c = ( 6 ) ( 3 ) ( 2 ) \endcode // \n \section getting_started_matrix_example An Example Involving Matrices // // Similarly easy and intuitive are expressions involving matrices: \code #include <iostream> #include <blaze/Math.h> using namespace blaze; int main() { // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call // operator three values of the matrix are explicitly set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; // Printing the resulting vector std::cout << "y =\n" << y << "\n"; // Instantiating a static column-major matrix. The matrix is directly initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; } \endcode // The output of this program is \code y = ( 16 ) ( 2 ) C = ( -1 -1 ) ( 0 -4 ) \endcode // \n \section getting_started_complex_example A Complex Example // // The following example is much more sophisticated. It shows the implementation of the Conjugate // Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b Blaze library: // // \image html cg.jpg // // In this example it is not important to understand the CG algorithm itself, but to see the // advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a // sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical // formulation and therefore has huge advantages in terms of readability and maintainability, // while the performance of the code is close to the expected theoretical peak performance: \code #include <blaze/Math.h> int main() { const size_t N ( 1000UL ); const size_t iterations( 10UL ); const size_t NN( N*N ); blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN ); double alpha, beta, delta; // ... Initializing the sparse matrix A // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = beta; } } \endcode // \n Hopefully this short tutorial gives a good first impression of how mathematical expressions // are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types, // will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and // matrix types, all possible operations on vectors and matrices, and of course all possible // mathematical expressions. // // \n Previous: \ref configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors**************************************************************************************** /*!\page vectors Vectors // // \tableofcontents // // // \n \section vectors_general General Concepts // <hr> // // The \b Blaze library currently offers five dense vector types (\ref vector_types_static_vector, // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, \ref vector_types_custom_vector, // and \ref vector_types_uniform_vector) and two sparse vector types (\ref vector_types_compressed_vector // and \ref vector_types_zero_vector). All vectors can be specified as either column vectors or row // vectors: \code using blaze::DynamicVector; using blaze::columnVector; using blaze::rowVector; // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode // Per default, all vectors in \b Blaze are column vectors: \code // Instantiation of a 3-dimensional column vector blaze::DynamicVector<int> c( 3UL ); \endcode // \n \section vectors_details Vector Details // <hr> // // - \ref vector_types // - \ref vector_operations // // // \n \section vectors_examples Examples // <hr> \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowVector; using blaze::columnVector; StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector // ... Resizing and initialization c = a + trans( b ); \endcode // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types*********************************************************************************** /*!\page vector_types Vector Types // // \tableofcontents // // // \n \section vector_types_dense_vectors Dense Vectors // <hr> // // \subsection vector_types_static_vector StaticVector // // The blaze::StaticVector class template is the representation of a fixed size vector with // statically allocated elements of arbitrary type. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/StaticVector.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the number of elements, the transpose flag, the alignment, the // padding, and the group tag of the vector can be specified via the six template parameters: \code namespace blaze { template< typename Type, size_t N, bool TF, AlignmentFlag AF, PaddingFlag PF, typename Tag > class StaticVector; } // namespace blaze \endcode // - \c Type: specifies the type of the vector elements. StaticVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the total number of vector elements. It is expected that StaticVector is // only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag. // - \c AF : specifies whether the first element of the vector is properly aligned with // respect to the available instruction set (SSE, AVX, ...). Possible values are // \c blaze::aligned and \c blaze::unaligned. The default value is // \c blaze::defaultAlignmentFlag. // - \c PF : specifies whether the vector should be padded to maximize the efficiency of // vectorized operations. Possible values are \c blaze::padded and \c blaze::unpadded. // The default value is \c blaze::defaultPaddingFlag. // - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at // compile time: \code // Definition of a 3-dimensional integral column vector blaze::StaticVector<int,3UL> a; // Definition of a 4-dimensional single precision column vector blaze::StaticVector<float,4UL,blaze::columnVector> b; // Definition of an unaligned, unpadded 6-dimensional double precision row vector blaze::StaticVector<double,6UL,blaze::rowVector,blaze::unaligned,blaze::unpadded> c; \endcode // \subsubsection vector_types_static_vector_alignment Alignment // // In case \c AF is set to \c blaze::aligned, the elements of a blaze::StaticVector are possibly // over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX, // AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...) // and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes // for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic // alignment: \code struct Int { int i; }; using VT1 = blaze::StaticVector<double,3UL>; using VT2 = blaze::StaticVector<complex<float>,2UL>; using VT3 = blaze::StaticVector<Int,5UL>; alignof( VT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT3 ); // Evaluates to 'alignof( Int )' \endcode // Note that an aligned blaze::StaticVector instance may be bigger than the sum of its data // elements: \code sizeof( VT1 ); // Evaluates to 32 for both SSE and AVX sizeof( VT2 ); // Evaluates to 16 for SSE and 32 for AVX sizeof( VT3 ); // Evaluates to 20; no special alignment requirements \endcode // Please note that for this reason an aligned blaze::StaticVector cannot be used in containers // using dynamic memory such as \c std::vector without additionally providing an allocator that // can provide over-aligned memory: \code using Type = blaze::StaticVector<double,3UL>; using Allocator = blaze::AlignedAllocator<Type>; std::vector<Type> v1; // Might be misaligned for AVX or AVX-512 std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512 \endcode // \subsubsection vector_types_static_vector_padding Padding // // Adding padding elements to the end of a blaze::StaticVector can have a significant impact on // the performance. For instance, assuming that AVX is available, then two padded 3-dimensional // vectors of double precision values can be added via a single SIMD addition operation: \code using blaze::StaticVector; using blaze::columnVector; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; StaticVector<double,3UL,columnVector,aligned,padded> a1, b1, c1; StaticVector<double,3UL,columnVector,unaligned,unpadded> a2, b2, c2; // ... Initialization c1 = a1 + b1; // AVX-based vector addition; maximum performance c2 = a2 + b2; // Scalar vector addition; limited performance sizeof( a1 ); // Evaluates to 32 for SSE and AVX, and 64 for AVX-512 sizeof( a2 ); // Evaluates to 24 for SSE, AVX, and AVX-512 (minimum size) \endcode // Due to padding, the first addition will run at maximum performance. On the flip side, the size // of each vector instance is increased due to the padding elements. The total size of an instance // depends on the number of elements and width of the available instruction set (16 bytes for // SSE, 32 bytes for AVX, and 64 bytes for AVX-512). // // The second addition will be limited in performance since due to the number of elements some of // the elements need to be handled in a scalar operation. However, the size of an \c unaligned, // \c unpadded blaze::StaticVector instance is guaranteed to be the sum of its elements. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \subsection vector_types_dynamic_vector DynamicVector // // The blaze::DynamicVector class template is the representation of an arbitrary sized vector // with dynamically allocated elements of arbitrary type. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/DynamicVector.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the transpose flag, the type of the allocator, and the group tag of // the vector can be specified via the four template parameters: \code namespace blaze { template< typename Type, bool TF, typename Alloc, typename Tag > class DynamicVector; } // namespace blaze \endcode // - \c Type : specifies the type of the vector elements. DynamicVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag. // - \c Alloc: specifies the type of allocator used to allocate dynamic memory. The default type // of allocator is \c blaze::AlignedAllocator. // - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best // choice for medium to large vectors. Its size can be modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::DynamicVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::DynamicVector<double,blaze::rowVector> c; \endcode // \subsubsection vector_types_dynamic_vector_allocators Allocators // // Via the third template parameter it is possible to customize the memory allocation of a // \c blaze::DynamicVector. The provided allocator is expected to represent an implementation of // the allocator concept of the standard library (see for instance // <a href="https://en.cppreference.com/w/cpp/container/vector">std::vector</a> and // <a href="https://en.cppreference.com/w/cpp/memory/allocator">std::allocator</a>). In // addition, the provided allocator is also required to provide properly (over-)aligned memory // for fundamental and complex numbers. For instance, in case SSE vectorization is possible, the // returned memory must be at least 16-byte aligned. In case AVX is active, the memory must be at // least 32-byte aligned, and in case of AVX-512 the memory must be even 64-byte aligned. // // // \n \subsection vector_types_hybrid_vector HybridVector // // The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and // the blaze::DynamicVector class templates. It represents a fixed size vector with statically // allocated elements, but still can be dynamically resized (within the bounds of the available // memory). It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/HybridVector.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the maximum number of elements, the transpose flag, the alignment, // the padding, and the group tag of the vector can be specified via the six template parameters: \code namespace blaze { template< typename Type, size_t N, bool TF, AlignmentFlag AF, PaddingFlag PF, typename Tag > class HybridVector; } // namespace blaze \endcode // - \c Type: specifies the type of the vector elements. HybridVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the maximum number of vector elements. It is expected that HybridVector // is only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag. // - \c AF : specifies whether the first element of the vector is properly aligned with // respect to the available instruction set (SSE, AVX, ...). Possible values are // \c blaze::aligned and \c blaze::unaligned. The default value is // \c blaze::defaultAlignmentFlag. // - \c PF : specifies whether the vector should be padded to maximize the efficiency of // vectorized operations. Possible values are \c blaze::padded and \c blaze::unpadded. // The default value is \c blaze::defaultPaddingFlag. // - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not // known at compile time or not fixed at runtime, but whose maximum size is known at compile // time: \code // Definition of a 3-dimensional integral column vector with a maximum size of 6 blaze::HybridVector<int,6UL> a( 3UL ); // Definition of a 4-dimensional single precision column vector with a maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL ); // Definition of a unaligned, unpadded double precision row vector with size 0 and a maximum size of 6 blaze::HybridVector<double,6UL,blaze::rowVector,blaze::unaligned,blaze::unpadded> c; \endcode // \subsubsection vector_types_hybrid_vector_alignment Alignment // // In case \c AF is set to \c blaze::aligned, the elements of a blaze::HybridVector are possibly // over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX, // AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...) // and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes // for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic // alignment: \code struct Int { int i; }; using VT1 = blaze::HybridVector<double,3UL>; using VT2 = blaze::HybridVector<complex<float>,2UL>; using VT3 = blaze::HybridVector<Int,5UL>; alignof( VT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT3 ); // Evaluates to 'alignof( Int )' \endcode // Note that an aligned blaze::HybridVector instance may be bigger than an according unaligned // blaze::HybridVector: \code sizeof( VT1 ); // Evaluates to 32 for both SSE and AVX sizeof( VT2 ); // Evaluates to 16 for SSE and 32 for AVX sizeof( VT3 ); // Evaluates to 20; no special alignment requirements \endcode // Please note that for this reason an aligned blaze::HybridVector cannot be used in containers // using dynamic memory such as \c std::vector without additionally providing an allocator that // can provide over-aligned memory: \code using Type = blaze::HybridVector<double,3UL>; using Allocator = blaze::AlignedAllocator<Type>; std::vector<Type> v1; // Might be misaligned for AVX or AVX-512 std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512 \endcode // \subsubsection vector_types_hybrid_vector_padding Padding // // Adding padding elements to the end of a blaze::HybridVector can have a significant impact on // the performance. For instance, assuming that AVX is available, then two padded 3-dimensional // vectors of double precision values can be added via a single SIMD addition operation: \code using blaze::HybridVector; using blaze::columnVector; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; HybridVector<double,3UL,columnVector,aligned,padded> a1, b1, c1; HybridVector<double,3UL,columnVector,unaligned,unpadded> a2, b2, c2; // ... Resizing and initialization c1 = a1 + b1; // AVX-based vector addition; maximum performance c2 = a2 + b2; // Scalar vector addition; limited performance sizeof( a1 ); // Evaluates to 48 for SSE, 64 and AVX, and 128 for AVX-512 sizeof( a2 ); // Evaluates to 32 for SSE, AVX, and AVX-512 (minimum size) \endcode // Due to padding, the first addition will run at maximum performance. On the flip side, the size // of each vector instance is increased due to the padding elements. The total size of an instance // depends on the number of elements and width of the available instruction set (16 bytes for // SSE, 32 bytes for AVX, and 64 bytes for AVX-512). // // The second addition will be limited in performance since due to the number of elements some of // the elements need to be handled in a scalar operation. However, the size of an \c unaligned, // \c unpadded blaze::HybridVector instance is guaranteed to be the sum of its elements plus the // necessary data members to store the current size. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \subsection vector_types_custom_vector CustomVector // // The blaze::CustomVector class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data // structure. Thus in contrast to all other dense vector types a custom vector does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom vector can therefore be considered an alias to the existing // array. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/CustomVector.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the properties of the given array of elements, the transpose flag, // and the group tag of the vector can be specified via the following five template parameters: \code namespace blaze { template< typename Type, AlignmentFlag AF, PaddingFlag PF, bool TF, typename Tag > class CustomVector; } // namespace blaze \endcode // - \c Type: specifies the type of the vector elements. blaze::CustomVector can be used with // any possibly cv-qualified, non-reference, non-pointer element type. // - \c AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not (\c blaze::aligned // or \c blaze::unaligned). // - \c PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not (\c blaze::padded // or \c blaze::unpadded). // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag. // - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::CustomVector is the right choice if any external array needs to be represented as // a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays using UnalignedUnpadded = CustomVector<int,unaligned,unpadded,columnVector>; std::vector<int> vec( 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); // Definition of a managed custom column vector for unaligned but padded 'float' arrays using UnalignedPadded = CustomVector<float,unaligned,padded,columnVector>; std::unique_ptr<float[]> memory1( new float[16] ); UnalignedPadded b( memory1.get(), 9UL, 16UL ); // Definition of a managed custom row vector for aligned, unpadded 'double' arrays using AlignedUnpadded = CustomVector<double,aligned,unpadded,rowVector>; std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 7UL ) ); AlignedUnpadded c( memory2.get(), 7UL ); // Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded = CustomVector<cplx,aligned,padded,columnVector>; std::unique_ptr<cplx[],Deallocate> memory3( allocate<cplx>( 8UL ) ); AlignedPadded d( memory3.get(), 5UL, 8UL ); \endcode // In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several // special characteristics. All of these result from the fact that a custom vector is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref vector_types_custom_vector_copy_operations</b> // -# <b>\ref vector_types_custom_vector_alignment</b> // -# <b>\ref vector_types_custom_vector_padding</b> // // \subsubsection vector_types_custom_vector_memory_management Memory Management // // The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // vector data structure. However, this flexibility comes with the price that the user of a custom // vector is responsible for the resource management. // // The following examples give an impression of several possible types of custom vectors: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3-dimensional custom vector with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); // Definition of a custom vector with size 3 and capacity 16 with aligned, padded and // externally managed integer array. Note that the std::unique_ptr must be guaranteed // to outlive the custom vector! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) ); CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL ); \endcode // \subsubsection vector_types_custom_vector_copy_operations Copy Operations // // As with all dense vectors it is possible to copy construct a custom vector: \code using blaze::CustomVector; using blaze::unaligned; using blaze::unpadded; using CustomType = CustomVector<int,unaligned,unpadded>; std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector a[1] = 20; // Also modifies the std::vector CustomType b( a ); // Creating a copy of vector a b[2] = 20; // Also affects vector a and the std::vector \endcode // It is important to note that a custom vector acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom vector that is referencing and representing // the same array as the original custom vector. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom vector, but modifies the values of the array: \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode // \subsubsection vector_types_custom_vector_alignment Alignment // // In case the custom vector is specified as \c aligned the passed array must be guaranteed to // be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For // instance, if AVX is active an array of integers must be 32-bit aligned: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 5UL ) ); CustomVector<int,aligned,unpadded> a( memory.get(), 5UL ); \endcode // In case the alignment requirements are violated, a \c std::invalid_argument exception is // thrown. // // \subsubsection vector_types_custom_vector_padding Padding // // Adding padding elements to the end of an array can have a significant impact on the performance. // For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors // of double precision values can be added via a single SIMD addition operation: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using CustomType = CustomVector<double,aligned,padded>; std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 4UL ) ); std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 4UL ) ); std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 4UL ) ); // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( memory1.get(), 3UL, 4UL ); CustomType b( memory2.get(), 3UL, 4UL ); CustomType c( memory3.get(), 3UL, 4UL ); // ... Initialization c = a + b; // AVX-based vector addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted, a scalar addition has to be used: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; using CustomType = CustomVector<double,aligned,unpadded>; std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 3UL ) ); std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 3UL ) ); std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 3UL ) ); // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( 3UL ), 3UL ); CustomType b( allocate<double>( 3UL ), 3UL ); CustomType c( allocate<double>( 3UL ), 3UL ); // ... Initialization c = a + b; // Scalar vector addition \endcode // Note the different number of constructor parameters for unpadded and padded custom vectors: // In contrast to unpadded vectors, where during the construction only the size of the array // has to be specified, during the construction of a padded custom vector it is additionally // necessary to explicitly specify the capacity of the array. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom vector the added padding elements must // guarantee that the capacity is greater or equal than the size and a multiple of the SIMD vector // width. In case of unaligned padded vectors the number of padding elements can be greater or // equal the number of padding elements of an aligned padded custom vector. In case the padding // is insufficient with respect to the available instruction set, a \c std::invalid_argument // exception is thrown. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \subsection vector_types_uniform_vector UniformVector // // The blaze::UniformVector class template is the representation of an arbitrary sized uniform // vector with elements of arbitrary type. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/UniformVector.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the transpose flag, and the group tag of the vector can be specified // via the three template parameters: \code namespace blaze { template< typename Type, bool TF, typename Tag > class UniformVector; } // namespace blaze \endcode // - \c Type: specifies the type of the vector elements. UniformVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag. // - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::UniformVector is the best choice for uniform vectors of any size. Its size can be // modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::UniformVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::UniformVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::UniformVector<double,blaze::rowVector> c; \endcode // \n \section vector_types_sparse_vectors Sparse Vectors // <hr> // // \subsection vector_types_compressed_vector CompressedVector // // The blaze::CompressedVector class is the representation of an arbitrarily sized sparse // vector, which stores only non-zero elements of arbitrary type. It can be included via the // header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/CompressedVector.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the transpose flag, and the group tag of the vector can be specified // via the three template parameters: \code namespace blaze { template< typename Type, bool TF, typename Tag > class CompressedVector; } // namespace blaze \endcode // - \c Type: specifies the type of the vector elements. CompressedVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag. // - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::CompressedVector is the right choice for all kinds of sparse vectors: \code // Definition of a 3-dimensional integral column vector blaze::CompressedVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL ); // Definition of a double precision row vector with size 0 blaze::CompressedVector<double,blaze::rowVector> c; \endcode // \n \subsection vector_types_zero_vector ZeroVector // // The blaze::ZeroVector class template is the representation of an immutable, arbitrary sized // zero vector with elements of arbitrary type. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/ZeroVector.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the transpose flag, and the group tag of the vector can be specified // via the three template parameters: \code namespace blaze { template< typename Type, bool TF, typename Tag > class ZeroVector; } // namespace blaze \endcode // - \c Type: specifies the type of the vector elements. ZeroVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::defaultTransposeFlag. // - \c Tag : optional type parameter to tag the vector. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::ZeroVector is the perfect choice to represent a zero vector: \code // Definition of a 3-dimensional integral zero column vector blaze::ZeroVector<int> a( 3UL ); // Definition of a 6-dimensional single precision zero column vector blaze::ZeroVector<float,blaze::columnVector> b( 6UL ); // Definition of a double precision row vector with size 0 blaze::ZeroVector<double,blaze::rowVector> c; \endcode // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations****************************************************************************** /*!\page vector_operations Vector Operations // // \tableofcontents // // // \n \section vector_operations_constructors Constructors // <hr> // // Instantiating and setting up a vector is very easy and intuitive. However, there are a few // rules to take care of: // - In case the last template parameter (the transpose flag) is omitted, the vector is per // default a column vector. // - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection vector_operations_default_construction Default Construction \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; // All vectors can be default constructed. Whereas the size // of StaticVectors is fixed via the second template parameter, // the initial size of a default constructed DynamicVector or // CompressedVector is 0. StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector. // All elements are initialized to 0. StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector. // Again, all elements are initialized to 0L. DynamicVector<float> v3; // Instantiation of a dynamic single precision column // vector of size 0. DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row // vector of size 0. CompressedVector<int> v5; // Instantiation of a compressed integer column // vector of size 0. CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row // vector of size 0. \endcode // \n \subsection vector_operations_size_construction Construction with Specific Size // // The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that // allows to immediately give the vector the required size. Whereas both dense vectors (i.e. // \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector // elements, \c CompressedVector merely acquires the size but remains empty. \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector // of size 9. The elements are NOT initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single // precision complex values. The elements are // default constructed. CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with // size 10. Initially, the vector provides no // capacity for non-zero elements. \endcode // \n \subsection vector_operations_initialization_constructors Initialization Constructors // // All dense vector classes offer a constructor that allows for a direct, homogeneous initialization // of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements // can be specified \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector. // All elements are initialized to 2. DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision // column vector of size 3. All elements are // set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column // vector of size 15, which provides enough // space for at least 3 non-zero elements. \endcode // \n \subsection vector_operations_array_construction Array Construction // // Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic // or static array, or with a \c std::array. If the vector is initialized from a dynamic array, the // constructor expects the actual size of the array as first argument, the array as second argument. // In case of a static array or \c std::array, the fixed size of the array is used: \code const unique_ptr<double[]> array1( new double[2] ); // ... Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( 2UL, array1.get() ); const int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); const std::array<float,3UL> array3{ 1.1F, 2.2F, 3.3F }; blaze::StaticVector<float,3UL> v15( array3 ); \endcode // \n \subsection vector_operations_initializer_list_construction Initializer List Construction // // In addition, all dense and sparse vector classes can be directly initialized by means of an // initializer list: \code blaze::DynamicVector<float> v16{ 1.0F, 2.0F, 3.0F, 4.0F }; blaze::CompressedVector<int> v17{ 0, 2, 0, 0, 5, 0, 7, 0 }; \endcode // Dynamically sized vectors (such as e.g. \ref vector_types_hybrid_vector, // \ref vector_types_dynamic_vector or \ref vector_types_compressed_vector) are sized according // to the size of the initializer list and all their elements are (copy) assigned the values of // the list. For fixed size vectors (such as e.g. \ref vector_types_static_vector) missing values // are initialized as default and in case the size of the initializer list exceeds the size // of the vector a \c std::invalid_argument exception is thrown. In case of sparse vectors, only // the non-zero elements are used to initialize the vector. // // \n \subsection vector_operations_copy_construction Copy Construction // // All dense and sparse vectors can be created as the copy of any other dense or sparse vector // with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector). \code StaticVector<int,9UL,columnVector> v18( v7 ); // Instantiation of the dense column vector v17 // as copy of the dense column vector v7. DynamicVector<int,rowVector> v19( v9 ); // Instantiation of the dense row vector v18 as // copy of the sparse row vector v9. CompressedVector<int,columnVector> v20( v1 ); // Instantiation of the sparse column vector v19 // as copy of the dense column vector v1. CompressedVector<float,rowVector> v21( v12 ); // Instantiation of the sparse row vector v20 as // copy of the row vector v12. \endcode // Note that it is not possible to create a \c StaticVector as a copy of a vector with a different // size: \code StaticVector<int,5UL,columnVector> v22( v7 ); // Runtime error: Size does not match! StaticVector<int,4UL,rowVector> v23( v10 ); // Compile time error: Size does not match! \endcode // \n \section vector_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse vectors: // \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment, // \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment. // // \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment // // Sometimes it may be necessary to assign the same value to all elements of a dense vector. // For this purpose, the assignment operator can be used: \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; // Setting all integer elements of the StaticVector to 2 v1 = 2; // Setting all double precision elements of the DynamicVector to 5.0 v2 = 5.0; \endcode // \n \subsection vector_operations_array_assignment Array Assignment // // Dense vectors can also be assigned a static array or \c std::array: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; const float array1[2] = { 1.0F, 2.0F }; const std::array<double,5UL> array2{ 2.1, 4.0, -1.7, 8.6, -7.2 }; v1 = array1; v2 = array2; \endcode // \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense or sparse // vector: \code blaze::DynamicVector<float> v1; blaze::CompressedVector<double,rowVector> v2; v1 = { 1.0F, 2.0F }; v2 = { 2.1, 0.0, -1.7, 0.0, -7.2 }; \endcode // Dynamically sized vectors (such as e.g. \ref vector_types_hybrid_vector, // \ref vector_types_dynamic_vector or \ref vector_types_compressed_vector) are resized according // to the size of the initializer list and all their elements are (copy) assigned the values of // the list. For fixed size vectors (such as e.g. \ref vector_types_static_vector) missing values // are reset to their default value and in case the size of the initializer list exceeds the size // of the vector a \c std::invalid_argument exception is thrown. In case of sparse vectors, only // the non-zero elements are considered. // // \n \subsection vector_operations_copy_assignment Copy Assignment // // For all vector types it is generally possible to assign another vector with the same transpose // flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the // assigned vector is required to have the same size as the \c StaticVector since the size of a // \c StaticVector cannot be adapted! \code blaze::StaticVector<int,3UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 3UL ); blaze::DynamicVector<float,columnVector> v3( 5UL ); blaze::CompressedVector<int,columnVector> v4( 3UL ); blaze::CompressedVector<float,rowVector> v5( 3UL ); // ... Initialization of the vectors v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign a row vector to a column vector \endcode // \n \subsection vector_operations_compound_assignment Compound Assignment // // Next to plain assignment, it is also possible to use addition assignment, subtraction // assignment, and multiplication assignment. Note however, that in contrast to plain assignment // the size and the transpose flag of the vectors has be to equal in order to able to perform a // compound assignment. \code blaze::StaticVector<int,5UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 5UL ); blaze::CompressedVector<float,columnVector> v3( 7UL ); blaze::DynamicVector<float,rowVector> v4( 7UL ); blaze::CompressedVector<float,rowVector> v5( 7UL ); // ... Initialization of the vectors v1 += v2; // OK: Addition assignment between two column vectors of the same size v1 += v3; // Runtime error: No compound assignment between vectors of different size v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size \endcode // \n \section vector_operations_element_access Element Access // <hr> // // \subsection vector_operations_subscript_operator_1 Subscript Operator // // The easiest and most intuitive way to access a dense or sparse vector is via the subscript // operator. The indices to access a vector are zero-based: \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; \endcode // Whereas using the subscript operator on a dense vector only accesses the already existing // element, accessing an element of a sparse vector via the subscript operator potentially // inserts the element into the vector and may therefore be more expensive. Consider the // following example: \code blaze::CompressedVector<int> v1( 10UL ); for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode // Although the compressed vector is only used for read access within the for loop, using the // subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore the // preferred way to traverse the non-zero elements of a sparse vector is to use iterators. // // \n \subsection vector_operations_iterators Iterators // // An alternate way to traverse the elements contained in a dense or sparse vector is by means // of iterators. For that purpose, all vectors provide the \c begin(), \c cbegin(), \c end(), // and \c cend() members functions. In case of non-const vectors, \c begin() and \c end() return // an \c Iterator, which allows a manipulation of the (non-zero) value. In case of a constant // vector or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned. Iterators // on dense vectors traverse all elements of the vector, including the zero elements. Iterators // on sparse vectors only traverse the non-zero elements. // // The following two examples demonstrate how to traverse the elements of a dense and sparse // vector, respectively: \code using blaze::DynamicVector; DynamicVector<int> v1( 10UL ); // Traversing all elements contained in the vector by Iterator for( DynamicVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) { *it = ...; // OK: Write access to the value of the element. ... = *it; // OK: Read access to the value of the element. } // Traversing all elements contained in the vector by ConstIterator for( DynamicVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the value of the element. } // Traversing the vector elements by means of a range-based for loop for( int& i : v1 ) { i = ...; // OK: Write access to the value of the element. ... = i; // OK: Read access to the value of the element. } \endcode \code using blaze::CompressedVector; CompressedVector<int> v2( 10UL ); // ... Initialization of the vector // Traversing the non-zero elements contained in the vector by Iterator for( CompressedVector<int>::Iterator it=v2.begin(); it!=v2.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } // Traversing the non-zero elements contained in the vector by ConstIterator for( CompressedVector<int>::ConstIterator it=v2.cbegin(); it!=v2.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( CompressedVector<int>::Iterator it=begin( v2 ); it!=end( v2 ); ++it ) { // ... } for( CompressedVector<int>::ConstIterator it=cbegin( v2 ); it!=cend( v2 ); ++it ) { // ... } \endcode // \n \subsection vector_operations_data .data() / data() // // Sometimes it is necessary to acquire a pointer to the first element of the underlying array // of a dense vector. For that purpose the \c data() member function or the free \c data() function // can be used: \code // Instantiating a dynamic vector with 10 elements blaze::DynamicVector<int> v( 10UL ); v.data(); // Returns a pointer to the first element of the dynamic vector data( v ); // Same effect as the member function \endcode // \n \section vector_operations_element_insertion Element Insertion // <hr> // // In contrast to dense vectors, that store all elements independent of their value and that // offer direct access to all elements, sparse vectors only store the non-zero elements contained // in the vector. Therefore it is necessary to explicitly add elements to the vector. // // \n \subsection vector_operations_subscript_operator_2 Subscript Operator // // The first option to add elements to a sparse vector is the subscript operator: \code using blaze::CompressedVector; CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode // In case the element at the given index is not yet contained in the vector, it is automatically // inserted. Otherwise the old value is replaced by the new value 2. The operator returns a // reference to the sparse vector element. // // \n \subsection vector_operations_set .set() // // An alternative to the subscript operator is the \c set() function: In case the element is not // yet contained in the vector the element is inserted, else the element's value is modified: \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode // \n \subsection vector_operations_insert .insert() // // The insertion of elements can be better controlled via the \c insert() function. In contrast to // the subscript operator and the \c set() function it emits an exception in case the element is // already contained in the vector. In order to check for this case, the \c find() function can be // used: \code // In case the element at index 4 is not yet contained in the matrix it is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) v1.insert( 4, 6 ); \endcode // \n \subsection vector_operations_append .append() // // Although the \c insert() function is very flexible, due to performance reasons it is not suited // for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill // a sparse vector is the \c append() function. It requires the sparse vector to provide enough // capacity to insert a new element. Additionally, the index of the new element must be larger // than the index of the previous element. Violating these conditions results in undefined // behavior! \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, 4 ); // Appending the element 4 at index 6 // ... \endcode // \n \section vector_operations_element_removal Element Removal // <hr> // // \subsection vector_operations_erase .erase() // // The \c erase() member functions can be used to remove elements from a sparse vector. The // following example gives an impression of the five different flavors of \c erase(): \code using blaze::CompressedVector; CompressedVector<int> v( 42 ); // ... Initialization of the vector // Erasing the element at index 21 v.erase( 21 ); // Erasing a single element via iterator v.erase( v.find( 4 ) ); // Erasing all non-zero elements in the range [7..24] v.erase( v.lowerBound( 7 ), v.upperBound( 24 ) ); // Erasing all non-zero elements with a value larger than 9 by passing a unary predicate v.erase( []( int i ){ return i > 9; } ); // Erasing all non-zero elements in the range [30..40] with a value larger than 5 v.erase( v.lowerBound( 30 ), v.upperBound( 40 ), []( int i ){ return i > 5; } ); \endcode // \n \section vector_operations_element_lookup Element Lookup // <hr> // // A sparse vector only stores the non-zero elements contained in the vector. Therefore, whenever // accessing a vector element at a specific index a lookup operation is required. Whereas the // subscript operator is performing this lookup automatically, it is also possible to use the // \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup. // // \n \subsection vector_operations_find .find() / find() // // The \c find() function can be used to check whether a specific element is contained in a sparse // vector. It specifically searches for the element at the given index. In case the element is // found, the function returns an iterator to the element. Otherwise an iterator just past the // last non-zero element of the compressed vector (the \c end() iterator) is returned. Note that // the returned iterator is subject to invalidation due to inserting operations via the subscript // operator, the \c set() function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the element at index 7. In case the element is not // contained in the vector, the end() iterator is returned. CompressedVector<int>::Iterator pos( a.find( 7 ) ); if( pos != a.end( 7 ) ) { // ... } \endcode // Alternatively, the free function \c find() can be used to find a specific element in a sparse // vector: \code find( a, 7 ); // Searching the element at index 7; same effect as the member function \endcode // \n \subsection vector_operations_lowerbound .lowerBound() / lowerBound() // // The \c lowerBound() function returns an iterator to the first element with an index not less // then the given index. In combination with the \c upperBound() function this function can be // used to create a pair of iterators specifying a range of indices. Note that the returned // iterator is subject to invalidation due to inserting operations via the subscript operator, // the \c set() function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the lower bound of index 17. CompressedVector<int>::Iterator pos1( a.lowerBound( 17 ) ); // Searching the upper bound of index 28 CompressedVector<int>::Iterator pos2( a.upperBound( 28 ) ); // Erasing all elements in the specified range a.erase( pos1, pos2 ); \endcode // Alternatively, the free function \c lowerBound() can be used to: \code lowerBound( a, 17 ); // Searching the lower bound of index 17; same effect as the member function \endcode // \n \subsection vector_operations_upperbound .upperBound() / upperBound() // // The \c upperBound() function returns an iterator to the first element with an index greater then // the given index. In combination with the \c lowerBound() function this function can be used to // create a pair of iterators specifying a range of indices. Note that the returned iterator is // subject to invalidation due to inserting operations via the subscript operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the lower bound of index 17. CompressedVector<int>::Iterator pos1( a.lowerBound( 17 ) ); // Searching the upper bound of index 28 CompressedVector<int>::Iterator pos2( a.upperBound( 28 ) ); // Erasing all elements in the specified range a.erase( pos1, pos2 ); \endcode // Alternatively, the free function \c upperBound() can be used to: \code upperBound( a, 28 ); // Searching the upper bound of index 28; same effect as the member function \endcode // \n \section vector_operations_non_modifying_operations Non-Modifying Operations // <hr> // // \subsection vector_operations_size .size() / size() // // Via the \c size() member function, the current size of a dense or sparse vector can be queried: \code // Instantiating a dynamic vector with size 10 blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 // Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // Returns 12 \endcode // Alternatively, the free function \c size() can be used to query to current size of a vector. // In contrast to the member function, the free function can also be used to query the size of // vector expressions: \code size( v1 ); // Returns 10, i.e. has the same effect as the member function size( v2 ); // Returns 12, i.e. has the same effect as the member function blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, i.e. the size of the resulting vector \endcode // \n \subsection vector_operations_capacity .capacity() / capacity() // // Via the \c capacity() (member) function the internal capacity of a dense or sparse vector // can be queried. Note that the capacity of a vector doesn't have to be equal to the size // of a vector. In case of a dense vector the capacity will always be greater or equal than // the size of the vector, in case of a sparse vector the capacity may even be less than // the size. \code v1.capacity(); // Returns at least 10 \endcode // For symmetry reasons, there is also a free function /c capacity() available that can be used // to query the capacity: \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function \endcode // Note, however, that it is not possible to query the capacity of a vector expression: \code capacity( A * v1 ); // Compilation error! \endcode // \n \subsection vector_operations_nonzeros .nonZeros() / nonZeros() // // For both dense and sparse vectors the number of non-zero elements can be determined via the // \c nonZeros() member function. Sparse vectors directly return their number of non-zero // elements, dense vectors traverse their elements and count the number of non-zero elements. \code v1.nonZeros(); // Returns the number of non-zero elements in the dense vector v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector \endcode // There is also a free function \c nonZeros() available to query the current number of non-zero // elements: \code nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in // a vector expression. However, the result is not the exact number of non-zero elements, but // may be a rough estimation: \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression \endcode // \n \subsection vector_operations_isempty isEmpty() // // The \c isEmpty() function returns whether the total number of elements of the vector is zero: \code blaze::DynamicVector<int> a; // Create an empty vector isEmpty( a ); // Returns true a.resize( 10 ); // Resize to 10 elements isEmpty( a ); // Returns false \endcode // \n \subsection vector_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse vector for non-a-number // elements: \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode // If at least one element of the vector is not-a-number, the function returns \c true, otherwise // it returns \c false. // // // \n \subsection vector_operations_isinf isinf() // // The \c isinf() function checks the given dense or sparse vector for infinite (\c inf) elements: \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isinf( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isinf( a ) ) { ... } \endcode // If at least one element of the vector is infinite, the function returns \c true, otherwise it // returns \c false. // // // \n \subsection vector_operations_isfinite isfinite() // // The \c isfinite() function checks if all elements of the given dense or sparse vector are // finite elements (i.e. normal, subnormal or zero elements, but not infinite or NaN): \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isfinite( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isfinite( a ) ) { ... } \endcode // If all elements of the vector are finite, the function returns \c true, otherwise it returns // \c false. // // // \n \subsection vector_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse vector is in default state: \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( isDefault( a ) ) { ... } \endcode // A vector is in default state if it appears to just have been default constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are // in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all // subvectors, element selections, rows, and columns) is in default state if all its elements are // in default state. For instance, in case the vector is instantiated for a built-in integral or // floating point data type, the function returns \c true in case all vector elements are 0 and // \c false in case any vector element is not 0. // // // \n \subsection vector_operations_isUniform isUniform() // // In order to check if all vector elements are identical, the \c isUniform() function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isUniform( a ) ) { ... } \endcode // Note that in case of sparse vectors the zero elements are also taken into account! // // // \n \subsection vector_operations_isZero isZero() // // In order to check if all vector elements are zero, the \c isZero() function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isZero( a ) ) { ... } \endcode // \n \subsection vector_operations_length length() / sqrLength() // // In order to calculate the length (magnitude) of a dense or sparse vector, both the \c length() // and \c sqrLength() function can be used: \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; const float len = length ( v ); // Computes the current length of the vector const float sqrlen = sqrLength( v ); // Computes the square length of the vector \endcode // Note that both functions can only be used for vectors with built-in or complex element type! // // // \n \subsection vector_operations_vector_trans trans() // // As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors // (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However, // vectors can be transposed via the \c trans() function: \code blaze::DynamicVector<int,columnVector> v1( 4UL ); blaze::CompressedVector<int,rowVector> v2( 4UL ); v1 = v2; // Compilation error: Cannot assign a row vector to a column vector v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it // to the column vector v1 v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column vectors \endcode // \n \subsection vector_operations_ctrans ctrans() // // It is also possible to compute the conjugate transpose of a vector. This operation is available // via the \c ctrans() function: \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector \endcode // \n \subsection vector_operations_reverse reverse() // // Via the \c reverse() function is is possible to reverse the elements of a dense or sparse // vector. The following examples demonstrates this by means of a dense vector: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4, 5 }; blaze::DynamicVector<int> b; b = reverse( a ); // Results in ( 5 4 3 2 1 ) \endcode // \n \subsection vector_operations_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given vector expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a dense and a sparse vector: \code using blaze::DynamicVector; using blaze::CompressedVector; blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization auto c = evaluate( a * b ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary vector is created and no copy operation is performed. Instead, the result // is directly written to the target vector due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code CompressedVector<double> d( a * b ); // No temporary & no copy operation DynamicVector<double> e( a * b ); // Temporary & copy operation d = evaluate( a * b ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicVector<double> a, b, c, d; d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector d = a + eval( b * c ); // No creation of a temporary vector \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // \n \subsection vector_operations_noalias noalias() // // The \b Blaze library is able to reliably detect aliasing during the assignment of vectors. // In case the aliasing would lead to an incorrect result, \b Blaze introduces an intermediate // temporary of the appropriate type to break the aliasing. For instance, in the following // example \b Blaze performs an alias detection in both assignments, but only, in the second // assignment it detects a problematic aliasing and uses an intermediate temporary in order // to be able to compute the correct result: \code blaze::DynamicVector<double> x, y; blaze::DynamicMatrix<double> A; x = x + y; // No problematic aliasing of x, no intermediate temporary is required. x = A * x; // Problematic aliasing of x; intermediate temporary required! \endcode // The detection of aliasing effects, however, takes a small runtime effort. In order to disable // the aliasing detection, the \c noalias() function can be used: \code blaze::DynamicVector<double> x, y; blaze::DynamicMatrix<double> A; x = noalias( x + y ); // No alias detection performed, no intermediate temporary. x = noalias( A * x ); // No alias detection performed, no intermediate temporary. // Note that the final result will be incorrect! \endcode // \warning The \c noalias() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Using \c noalias() in a situation // where an aliasing effect occurs leads to undefined behavior (which can be violated invariants // or wrong computation results)! // // \n \subsection vector_operations_nosimd nosimd() // // By default, \b Blaze attempts to vectorize all operations by means of SSE, AVX, etc. in order // to achieve maximum performance. However, via the \c nosimd() operation it is possible to disable // the SIMD evaluation of any operation: \code blaze::DynamicVector<double> x, y; blaze::DynamicMatrix<double> A; x = nosimd( x + y ); // Disables SIMD for the vector/vector addition x = nosimd( A * x ); // Disables SIMD for the matrix/vector multiplication \endcode // Please note that the main purpose of the \c nosimd() operation is to enable an easy performance // comparison between the vectorized and non-vectorized evaluation. Using the \c nosimd() operation // will likely result in significantly reduced performance! // // // \n \section vector_operations_modifying_operations Modifying Operations // <hr> // // \subsection vector_operations_resize_reserve .resize() / .reserve() // // The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector // cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c CompressedVectors can be changed via the \c resize() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); v2[1] = -2; v2[3] = 11; // Adapting the size of the dynamic and compressed vectors. The (optional) second parameter // specifies whether the existing elements should be preserved. Per default, the existing // elements are preserved. v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain // uninitialized, elements of class type are default constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the // new elements are NOT initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved. v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost. \endcode // Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors) // on the vector: \code blaze::DynamicVector<int,rowVector> v1( 10UL ); // Creating a dynamic vector of size 10 auto sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6] v1.resize( 6UL ); // Resizing the vector invalidates the view \endcode // When the internal capacity of a vector is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // Returns 0 v1.capacity(); // Returns at least 100 \endcode // Note that the size of the vector remains unchanged, but only the internal capacity is set // according to the specified value! // // \n \subsection vector_operations_shrinkToFit .shrinkToFit() // // The internal capacity of vectors with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 integers v1.resize( 10UL ); // Resize to 10, but the capacity is preserved v1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c size(). Please // also note that in case a reallocation occurs, all iterators (including \c end() iterators), all // pointers and references to elements of the vector are invalidated. // // \subsection vector_operations_reset_clear reset() / clear() // // In order to reset all elements of a vector, the \c reset() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); // Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged. reset( v1 ); // Resetting all elements v1.size(); // Returns 3: size and capacity remain unchanged \endcode // In order to return a vector to its default state (i.e. the state of a default constructed // vector), the \c clear() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); // Resetting the entire vector. clear( v1 ); // Resetting the entire vector v1.size(); // Returns 0: size is reset, but capacity remains unchanged \endcode // Note that resetting or clearing both dense and sparse vectors does not change the capacity // of the vectors. // // // \n \subsection vector_operations_swap swap() // // Via the \c swap() function it is possible to completely swap the contents of two vectors of // the same type: \code blaze::DynamicVector<int,columnVector> v1( 10UL ); blaze::DynamicVector<int,columnVector> v2( 20UL ); swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode // \n \section vector_operations_arithmetic_operations Arithmetic Operations // <hr> // // \subsection vector_operations_normalize normalize() // // The \c normalize() function can be used to scale any non-zero vector to a length of 1. In // case the vector does not contain a single non-zero element (i.e. is a zero vector), the // \c normalize() function returns a zero vector. \code blaze::DynamicVector<float,columnVector> v1( 10UL ); blaze::CompressedVector<double,columnVector> v2( 12UL ); v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 (or 0 in case of a zero vector) \endcode // Note that the \c normalize() function only works for floating point vectors. The attempt to // use it for an integral vector results in a compile time error. // // // \n \subsection vector_operations_min_max min() / max() // // The \c min() and \c max() functions can be used for a single vector, multiple vectors, and // a vector and a scalar. // // <b>Single Vector</b> // // If passed a single vector, the functions return the smallest and largest element of the given // dense vector or the smallest and largest non-zero element of the given sparse vector, // respectively: \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 }; min( a ); // Returns -5 max( a ); // Returns 7 \endcode \code blaze::CompressedVector<int> b{ 1, 0, 3, 0 }; min( b ); // Returns 1 max( b ); // Returns 3 \endcode // For more information on the unary \c min() and \c max() reduction operations see the // \ref vector_operations_reduction_operations section. // // <b>Multiple Vectors</b> // // If passed two or more dense vectors, the \c min() and \c max() functions compute the // componentwise minimum or maximum of the given vectors, respectively: \code blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 }; blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 }; min( a, c ); // Results in the vector ( -5, 1, -7, -4 ) max( a, c, d ); // Results in the vector ( -5, 3, 7, 4 ) \endcode // Please note that sparse vectors can only be used in the unary \c min() and \c max() functions. // Also note that all forms of the \c min() and \c max() functions can be used to compute the // smallest and largest element of a vector expression: \code min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector min( a + c, c - d ); // Results in ( -10 -2 -7 0 ) max( a - c, c + d ); // Results in ( 0 4 14 6 ) \endcode // <b>Vector and Scalar</b> // // If passed a dense vector and a scalar, the \c min() and \c max() functions compute the // componentwise minimum or maximum between the given vector and a uniform vector represented by // the scalar value: \code min( a, 0 ); // Results in ( -5, 0, 0, -4 ) min( 0, a ); // Results in ( -5, 0, 0, -4 ) max( a, 0 ); // Results in ( 0, 2, 7, 0 ) max( 0, a ); // Results in ( 0, 2, 7, 0 ) \endcode // \n \subsection vector_operators_softmax softmax() // // The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called // the normalized exponential function, of a given dense vector can be computed via \c softmax(). // The resulting dense vector consists of real values in the range (0..1], which add up to 1. \code blaze::StaticVector<double,7UL,rowVector> x{ 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0 }; blaze::StaticVector<double,7UL,rowVector> y; // Evaluating the softmax function y = softmax( x ); // Results in ( 0.024 0.064 0.175 0.475 0.024 0.064 0.175 ) double s = sum( y ); // Results in 1 \endcode // \n \subsection vector_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a vector. // For instance, the following computation \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ // \n \subsection vector_operators_sign sign() // // The \c sign() function can be used to evaluate the sign of each element of a vector \a a. For // each element \c i the corresponding result is 1 if \a a[i] is greater than zero, 0 if \a a[i] // is zero, and -1 if \a a[i] is less than zero. For instance, the following use of the \c sign() // function \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, 0 }; blaze::StaticVector<int,3UL,rowVector> b( sign( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} -1 \\ 1 \\ 0 \\ \end{array}\right)\f$ // \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a vector, respectively: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = floor( a ); // Rounding down each element of the vector b = ceil ( a ); // Rounding up each element of the vector b = trunc( a ); // Truncating each element of the vector b = round( a ); // Rounding each element of the vector \endcode // \n \subsection vector_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse vector to compute the complex // conjugate of each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode // Additionally, vectors can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicVector<cplx> c( 5UL ); conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as above \endcode // \n \subsection vector_operators_real real() // // The \c real() function can be used on a dense or sparse vector to extract the real part of // each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the real part of each vector element // ( -2 ) // ( 1 ) StaticVector<double,2UL> b; b = real( a ); \endcode // \n \subsection vector_operators_imag imag() // // The \c imag() function can be used on a dense or sparse vector to extract the imaginary part // of each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the imaginary part of each vector element // ( -1 ) // ( 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode // \n \subsection vector_operators_arg arg() // // The \c arg() function can be used on a dense or sparse vector to compute the phase angle for // each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Compute the phase angle of each vector element // ( -2.67795 ) // ( 0.785398 ) StaticVector<double,2UL> b; b = arg( a ); \endcode // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // vector can be computed: \code blaze::DynamicVector<double> a, b, c; b = sqrt( a ); // Computes the square root of each element c = invsqrt( a ); // Computes the inverse square root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a vector: \code blaze::HybridVector<double,3UL> a, b, c; b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hypot hypot() // // The \c hypot() function can be used to compute the componentwise hypotenous for a pair of // dense vectors: \code blaze::StaticVector<double,3UL> a, b, c; c = hypot( a, b ); // Computes the componentwise hypotenuous \endcode // \n \subsection vector_operations_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a vector to a specific range: \code blaze::DynamicVector<double> a, b b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a vector. // If passed a vector and a numeric exponent, the function computes the exponential value of each // element of the vector using the same exponent. If passed a second vector, the function computes // the componentwise exponential value: \code blaze::StaticVector<double,3UL> a, b, c; c = pow( a, 1.2 ); // Computes the exponential value of each element c = pow( a, b ); // Computes the componentwise exponential value \endcode // \n \subsection vector_operations_exp exp() / exp2() / exp10() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // vector, respectively: \code blaze::DynamicVector<double> a, b; b = exp( a ); // Computes the base e exponential of each element b = exp2( a ); // Computes the base 2 exponential of each element b = exp10( a ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_log log() / log2() / log10() / log1p() / lgamma() // // The \c log(), \c log2(), \c log10(), \c log1p() and \c lgamma() functions can be used to // compute the natural, binary and common logarithm of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = log( a ); // Computes the natural logarithm of each element b = log2( a ); // Computes the binary logarithm of each element b = log10( a ); // Computes the common logarithm of each element b = log1p( a ); // Computes the natural logarithm of x+1 of each element b = lgamma( a ); // Computes the natural logarithm of the absolute value of the gamma function \endcode // \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sin( a ); // Computes the sine of each element of the vector b = cos( a ); // Computes the cosine of each element of the vector b = tan( a ); // Computes the tangent of each element of the vector b = asin( a ); // Computes the inverse sine of each element of the vector b = acos( a ); // Computes the inverse cosine of each element of the vector b = atan( a ); // Computes the inverse tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sinh( a ); // Computes the hyperbolic sine of each element of the vector b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_atan2 atan2() // // The multi-valued inverse tangent is available for a pair of dense vectors: \code blaze::DynamicVector<double> a, b, c; c = atan2( a, b ); // Computes the componentwise multi-valued inverse tangent \endcode // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a vector: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = erf( a ); // Computes the error function of each element b = erfc( a ); // Computes the complementary error function of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_map map() / forEach() // // Via the \c map() functions it is possible to execute componentwise custom operations on vectors. // The unary \c map() function can be used to apply a custom operation on each element of a dense // or sparse vector. For instance, the following example demonstrates a custom square root // computation via a lambda: \code blaze::DynamicVector<double> a, b; b = map( a, []( double d ) { return std::sqrt( d ); } ); \endcode // The N-ary \c map() functions can be used to apply an operation componentwise to the elements // of N dense vectors (where \f$ N <= 6 \f$). The following example demonstrates the merging of // two column vectors of double precision values into a vector of double precision complex numbers: \code blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 }; blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 }; blaze::DynamicVector< complex<double> > cplx; // Creating the vector // ( ( 2.1, 0.3) ) // ( (-4.2, 1.4) ) // ( ( 1.0, 2.9) ) // ( ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } ); \endcode // Applying the map() function to a column vector and a row vector results in the outer map of // the two vectors. The following example demonstrates the outer sum of a column vector and a // row vector: \code blaze::DynamicVector<int,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; // Results in the matrix // // ( 1 5 0 6 ) // A = ( 4 8 3 9 ) // ( -2 2 -3 3 ) // blaze::StaticMatrix<int,3UL,4UL> M1 = map( v1, v2, []( int a, int b ){ return a + b; } ); \endcode // Although the computation in the two previous examples can be parallelized it is not vectorized // and thus cannot perform at peak performance. However, it is also possible to create vectorized // custom operations. See \ref custom_operations for a detailed overview of the possibilities of // custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used, but the function might be deprecated in future releases of \b Blaze. // // // \n \subsection vector_operations_select select() // // The \c select() function performs a componentwise, conditional selection of elements. Given // the three dense vectors \c cond, \c a, and \c b, in case an element in the \c cond vector // evaluates to \c true, the according element of \a a is selected, in case the \a cond element // evaluates to \c false, the according element of \a b is selected. The following example // demonstrates the use of the \a select() function: \code blaze::DynamicVector<bool> cond{ true, false, true false }; blaze::DynamicVector<int> a{ 1, -1, 1, -1 }; blaze::DynamicVector<int> b{ -2, 2, -2, 2 }; blaze::DynamicVector<int> c; // ... Resizing and initialization c = select( cond, a, b ); // Results in ( 1, 2, 1, 2 ) \endcode // \n \section vector_operations_reduction_operations Reduction Operations // <hr> // // \subsection vector_operations_reduction_operations_reduce reduce() // // The \c reduce() function performs a total reduction of the elements of the given dense vector // or the non-zero elements of the given sparse vector. The following examples demonstrate the // total reduction of a dense and sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double totalsum1 = reduce( a, blaze::Add() ); const double totalsum2 = reduce( a, []( double a, double b ){ return a + b; } ); \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization const double totalmin1 = reduce( a, blaze::Min() ); const double totalmin2 = reduce( a, []( double a, double b ){ return blaze::min( a, b ); } ); \endcode // As demonstrated in the examples it is possible to pass any binary callable as custom reduction // operation. However, for instance in the case of lambdas the vectorization of the reduction // operation is compiler dependent and might not perform at peak performance. However, it is also // possible to create vectorized custom operations. See \ref custom_operations for a detailed // overview of the possibilities of custom operations. // // Please note that the evaluation order of the \c reduce() function is unspecified. Thus the // behavior is non-deterministic if the given reduction operation is not associative or not // commutative. Also, the operation is undefined if the given reduction operation modifies the // values. // // \n \subsection vector_operations_reduction_operations_sum sum() // // The \c sum() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of addition: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; const int totalsum = sum( a ); // Results in 10 \endcode \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; const int totalsum = sum( a ); // Results in 10 \endcode // Please note that the evaluation order of the \c sum() function is unspecified. // // \n \subsection vector_operations_reduction_operations_prod prod() // // The \c prod() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of multiplication: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; const int totalprod = prod( a ); // Results in 24 \endcode \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; const int totalprod = prod( a ); // Results in 24 \endcode // \n \subsection vector_operations_reduction_operations_min min() // // The unary \c min() function returns the smallest element of the given dense vector or the // smallest non-zero element of the given sparse vector. It can only be used for element types // that support the smaller-than relationship. In case the given vector currently has a size // of 0, the returned value is the default value (e.g. 0 in case of fundamental data types). \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const int totalmin = min( a ); // Results in -2 \endcode \code blaze::CompressedVector<int> a{ 1, 0, 3, 0 }; const int totalmin = min( a ); // Results in 1 \endcode // \note In case the sparse vector is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed vector has only 2 non-zero elements. // However, the minimum of the vector is 1. // // \n \subsection vector_operations_reduction_operations_max max() // // The unary \c max() function returns the largest element of the given dense vector or the // largest non-zero element of the given sparse vector. It can only be used for element types // that support the smaller-than relationship. In case the given vector currently has a size // of 0, the returned value is the default value (e.g. 0 in case of fundamental data types). \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const int totalmax = max( a ); // Results in 3 \endcode \code blaze::CompressedVector<int> a{ -1, 0, -3, 0 }; const int totalmin = max( a ); // Results in -1 \endcode // \note In case the sparse vector is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed vector has only 2 non-zero elements. // However, the maximum of the vector is -1. // // \n \subsection vector_operations_reduction_operations_argmin argmin() // // The \c argmin() function returns the index of the first smallest element of the given dense // vector. This function can only be used for element types that support the smaller-than // relationship. In case the given vector currently has a size of 0, the returned index is 0. \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const size_t minindex = argmin( a ); // Results in 1 \endcode // \n \subsection vector_operations_reduction_operations_argmax argmax() // // The \c argmax() function returns the index of the first largest element of the given dense // vector. This function can only be used for element types that support the smaller-than // relationship. In case the given vector currently has a size of 0, the returned index is 0. \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const size_t maxindex = argmax( a ); // Results in 2 \endcode // \n \section vector_operations_norms Norms // <hr> // // \subsection vector_operations_norms_norm norm() // // The \c norm() function computes the L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = norm( a ); const double norm2 = norm( b ); \endcode // \n \subsection vector_operations_norms_sqrnorm sqrNorm() // // The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = sqrNorm( a ); const double norm2 = sqrNorm( b ); \endcode // \n \subsection vector_operations_norms_l1norm l1Norm() // // The \c l1Norm() function computes the squared L1 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = l1Norm( a ); const double norm2 = l1Norm( b ); \endcode // \n \subsection vector_operations_norms_l2norm l2Norm() // // The \c l2Norm() function computes the squared L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = l2Norm( a ); const double norm2 = l2Norm( b ); \endcode // \n \subsection vector_operations_norms_l3norm l3Norm() // // The \c l3Norm() function computes the squared L3 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = l3Norm( a ); const double norm2 = l3Norm( b ); \endcode // \n \subsection vector_operations_norms_l4norm l4Norm() // // The \c l4Norm() function computes the squared L4 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = l4Norm( a ); const double norm2 = l4Norm( b ); \endcode // \n \subsection vector_operations_norms_lpnorm lpNorm() // // The \c lpNorm() function computes the general Lp norm of the given dense or sparse vector, // where the norm is specified by either a compile time or a runtime argument: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = lpNorm<2>( a ); // Compile time argument const double norm2 = lpNorm( b, 2.3 ); // Runtime argument \endcode // \n \subsection vector_operations_norms_maxnorm linfNorm() / maxNorm() // // The \c linfNorm() and \c maxNorm() functions compute the infinity/maximum norm of the given // dense or sparse vector: \code blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization const double norm1 = linfNorm( a ); const double norm2 = maxNorm( b ); \endcode // \n \section vector_operations_scalar_expansion Scalar Expansion // <hr> // // By means of the \c uniform() function it is possible to expand a scalar value into a dense, // uniform vector. By default, the resulting uniform vector is a column vector, but it is possible // to specify the transpose flag explicitly: \code using blaze::columnVector; int scalar = 5; blaze::DynamicVector<int,columnVector> v; // ... Resizing and initialization // Expansion of 'scalar' to a 3-dimensional uniform column vector // // ( 5 ) // ( 5 ) // ( 5 ) // v = uniform( 3UL, scalar ); v = uniform<columnVector>( 3UL, scalar ); \endcode // \n \section vector_operations_vector_expansion Vector Expansion // <hr> // // Via the \c expand() function it is possible to convert a dense or sparse vector into a matrix. // A column vector is expanded into a column-major matrix, a row vector is expanded into a // row-major matrix. As demonstrated by the following examples, \c expand() can be used with both // runtime and compile time parameters: \code blaze::DynamicVector<int,columnVector> a{ 1, 2, 3 }; blaze::CompressedVector<int,rowVector> b{ 1, 0, 3, 0, 5 }; // Expand the dense column vector ( 1 2 3 ) into a dense 3x5 column-major matrix // // ( 1 1 1 1 1 ) // ( 2 2 2 2 2 ) // ( 3 3 3 3 3 ) // expand( a, 5 ); // Runtime parameter expand<5>( a ); // Compile time parameter // Expand the sparse row vector ( 1 0 3 0 5 ) into a sparse 3x5 row-major matrix // // ( 1 0 3 0 5 ) // ( 1 0 3 0 5 ) // ( 1 0 3 0 5 ) // expand( b, 3 ); // Runtime parameter expand<3>( b ); // Compile time parameter \endcode // \n \section vector_operations_vector_repetition Vector Repetition // <hr> // // Via the \c repeat() function it is possible to repeat a dense or sparse vector multiple times // to represent a larger vector. Repeating a column vector results in a column vector, repeating // a row vector results in a row vector. As demonstrated by the following examples, \c repeat() // can be used with both runtime and compile time parameters: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<int,columnVector> a1{ 1, 0, -2 }; blaze::CompressedVector<int,rowVector> b1{ 0, -1, 7 }; blaze::DynamicVector<int,columnVector> a2; blaze::CompressedVector<int,rowVector> b2; // ... Resizing and initialization // Repeating the dense column vector ( 1 0 -2 ) three times results in // // ( 1 0 -2 1 0 -2 1 0 -2 ) // a2 = repeat( a1, 3UL ); a2 = repeat<3UL>( a1 ); // Repeating the sparse row vector ( 0 -1 7 ) three times results in // // ( 0 -1 7 0 -1 7 0 -1 7 ) // b2 = repeat( b1, 3UL ); b2 = repeat<3UL>( b1 ); \endcode // \n \section vector_operations_statistic_operations Statistic Operations // <hr> // // \subsection vector_operations_mean mean() // // The <a href="https://en.wikipedia.org/wiki/Arithmetic_mean">(arithmetic) mean</a> of a dense or // sparse vector can be computed via the \c mean() function. In case of a sparse vector, both the // non-zero and zero elements are taken into account. The following example demonstrates the // computation of the mean of a dense vector: \code blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 }; const double m = mean( v ); // Results in 4.2 (i.e. 21/5) \endcode // In case the size of the given vector is 0, a \c std::invalid_argument is thrown. // // \n \subsection vector_operations_var var() // // The <a href="https://en.wikipedia.org/wiki/Variance">variance</a> of a dense or sparse vector // can be computed via the \c var() function. In case of a sparse vector, both the non-zero and // zero elements are taken into account. The following example demonstrates the computation of // the variance of a dense vector: \code blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 }; const double v = var( v ); // Results in 5.7 \endcode // In case the size of the given vector is smaller than 2, a \c std::invalid_argument is thrown. // // \n \subsection vector_operations_stddev stddev() // // The <a href="https://en.wikipedia.org/wiki/Standard_deviation">standard deviation</a> of a // dense or sparse vector can be computed via the \c stddev() function. In case of a sparse // vector, both the non-zero and zero elements are taken into account. The following example // demonstrates the computation of the standard deviation of a dense vector: \code blaze::DynamicVector<int> v{ 1, 4, 3, 6, 7 }; const double s = stddev( v ); // Results in 2.38747 \endcode // In case the size of the given vector is smaller than 2, a \c std::invalid_argument is thrown. // // // \n \section vector_operations_declaration_operations Declaration Operations // <hr> // // \subsection vector_operations_declzero declzero() // // The \c declzero() operation can be used to explicitly declare any vector or vector expression // as zero vector: \code blaze::DynamicVector<double> a, b; // ... Resizing and initialization b = declzero( a ); \endcode // Any vector or vector expression that has been declared as zero vector via \c declzero() will // gain all the benefits of a zero vector, which range from reduced runtime checking to a // considerable speed-up in computations: \code using blaze::DynamicVector; DynamicVector<double> a, b, c; // ... Resizing and initialization isZero( declzero( a ) ); // Will always return true without runtime effort c = declzero( a ) + b; // Declare the left operand of the vector addition as a // zero vector, i.e. no addition needs to be performed \endcode // \warning The \c declzero() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-zero vector or // vector expression as zero vector via the \c declzero() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \section vector_operations_vector_generators Vector Generators // <hr> // // \subsection vector_operations_generate generate() // // The \c generate() function returns a dense vector filled elementwise via the given custom // operation. By default, the returned vector is a column vector, but this setting can be changed // via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is // possible to specify the transpose flag explicitly.\n // The following example demonstrates the use of the \c generate() function: \code using blaze::generate; using blaze::columnVector; using blaze::rowVector; // Generates the homogeneous integer vector ( 2, 2, 2, 2, 2 ) blaze::DynamicVector<int,columnVector> a; a = generate( 5UL, []( size_t index ){ return 2; } ); // Generates the linearly spaced float vector ( 2.1, 3.2, 4.3, 5.4 ) blaze::DynamicVector<float,columnVector> b; b = generate( 4UL, []( size_t index ){ return 2.1F + 1.1F*index; } ); // Generates the logarithmically spaced double vector ( 1.0, 10.0, 100.0, 1000.0 ) blaze::DynamicVector<double,columnVector> c; c = generate<columnVector>( 4UL, []( size_t index ){ return blaze::exp10( 1.0 + 1.0*index ); } ); // Generates the vector of integer vectors ( ( 1, 2 ), ( 2, 3 ), ( 3, 4 ), ( 4, 5 ) ) using VT = blaze::StaticVector<int,2UL>; blaze::StaticVector<VT,4UL,rowVector> d; d = generate<rowVector>( []( size_t index ) { return evaluate( VT{ 1, 2 } + index ); } ); \endcode // \n \subsection vector_operations_linspace linspace() // // The \c linspace() function returns a dense vector filled with linearly spaced elements. By // default, the returned vector is a column vector, but this setting can be changed via the // \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is possible // to specify the transpose flag explicitly.\n // The following example demonstrates the use of the \c linspace() function: \code using blaze::linspace; using blaze::columnVector; using blaze::rowVector; // Generates the linearly spaced integer vector ( 2, 3, 4, 5, 6 ) blaze::DynamicVector<int,columnVector> a; a = linspace( 5UL, 2, 6 ); // Generates the linearly spaced integer vector ( 6, 5, 4, 3, 2 ) blaze::DynamicVector<int,columnVector> b; b = linspace<columnVector>( 5UL, 6, 2 ); // Generates the linearly spaced float vector ( 2.1, 3.2, 4.3, 5.4 ) blaze::DynamicVector<float,rowVector> c; c = linspace<rowVector>( 4UL, 2.1F, 5.4F ); \endcode // \n \subsection vector_operations_logspace logspace() // // The \c logspace() function returns a dense vector filled with logarithmically spaced elements. // By default, the returned vector is a column vector, but this setting can be changed via the // \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is possible // to specify the transpose flag explicitly.\n // The following example demonstrates the use of the \c logspace() function: \code using blaze::logspace; using blaze::columnVector; using blaze::rowVector; // Generates the logarithmically spaced double vector ( 1, 10, 100, 1000 ) blaze::DynamicVector<int,columnVector> a; a = logspace( 4UL, 0, 3 ); // Generates the logarithmically spaced double vector ( 1000.0, 100.0, 10.0, 1.0 ) blaze::DynamicVector<double,rowVector> b; b = logspace<rowVector>( 4UL, 3.0, 0.0 ); \endcode // \n \subsection vector_operations_uniform uniform() // // The \c uniform() function creates a uniform vector of the given size. By default, the // resulting uniform vector is a column vector, but this setting can be changed via the // \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is // possible to specify the transpose flag explicitly.\n // The following example demonstrates the use of the \c uniform() function: \code using blaze::uniform; using blaze::columnVector; using blaze::rowVector; // Creates the uniform column vector ( 1, 1, 1, 1, 1 ) auto u1 = uniform( 5UL, 1 ); // Creates the uniform column vector ( 1.2, 1.2, 1.2 ) auto u2 = uniform<columnVector>( 3UL, 1.2 ); // Creates the uniform row vector ( 5U, 5U, 5U, 5U ) auto u3 = uniform<rowVector>( 4UL, 5U ); \endcode // \n \subsection vector_operations_zero zero() // // The \c zero() function creates a zero vector of the given element type and size. By default, // the resulting zero vector is a column vector, but this setting can be changed via the // \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch (see \ref transpose_flag). Alternatively it is // possible to specify the transpose flag explicitly.\n // The following example demonstrates the use of the \c zero() function: \code using blaze::zero; using blaze::columnVector; using blaze::rowVector; // Creates the zero column vector ( 0, 0, 0, 0, 0 ) auto z1 = zero<int>( 5UL ); // Creates the zero column vector ( 0.0, 0.0, 0.0 ) auto z2 = zero<double,columnVector>( 3UL ); // Creates the zero row vector ( 0U, 0U, 0U, 0U ) auto z3 = zero<unsigned int,rowVector>( 4UL ); \endcode // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices*************************************************************************************** /*!\page matrices Matrices // // \tableofcontents // // // \n \section matrices_general General Concepts // <hr> // // The \b Blaze library currently offers five dense matrix types (\ref matrix_types_static_matrix, // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, \ref matrix_types_custom_matrix, // and \ref matrix_types_uniform_matrix) and three sparse matrix types (\ref matrix_types_compressed_matrix, // \ref matrix_types_identity_matrix, and \ref matrix_types_zero_matrix). All matrices can either // be stored as row-major matrices or column-major matrices: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, { 3, 6 } }; \endcode // Per default, all matrices in \b Blaze are row-major matrices: \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( 3UL, 3UL ); \endcode // \n \section matrices_details Matrix Details // <hr> // // - \ref matrix_types // - \ref matrix_operations // // // \n \section matrices_examples Examples // <hr> \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix // ... Resizing and initialization C = A * B; \endcode // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types*********************************************************************************** /*!\page matrix_types Matrix Types // // \tableofcontents // // // \n \section matrix_types_dense_matrices Dense Matrices // <hr> // // \subsection matrix_types_static_matrix StaticMatrix // // The blaze::StaticMatrix class template is the representation of a fixed size matrix with // statically allocated elements of arbitrary type. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/StaticMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the number of rows and columns, the storage order of the matrix, // the alignment, the padding, and the group tag of the matrix can be specified via the seven // template parameters: \code namespace blaze { template< typename Type, size_t M, size_t N, bool SO, AlignmentFlag AF, PaddingFlag PF, typename Tag > class StaticMatrix; } // namespace blaze \endcode // - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c M : specifies the total number of rows of the matrix. // - \c N : specifies the total number of columns of the matrix. Note that it is expected // that StaticMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the // matrix. The default value is \c blaze::defaultStorageOrder. // - \c AF : specifies whether the first element of every row/column is properly aligned with // respect to the available instruction set (SSE, AVX, ...). Possible values are // \c blaze::aligned and \c blaze::unaligned. The default value is // \c blaze::defaultAlignmentFlag. // - \c PF : specifies whether every row/column of the matrix should be padded to maximize the // efficiency of vectorized operations. Possible values are \c blaze::padded and // \c blaze::unpadded. The default value is \c blaze::defaultPaddingFlag. // - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are // known at compile time: \code // Definition of a 3x4 integral row-major matrix blaze::StaticMatrix<int,3UL,4UL> A; // Definition of a 4x6 single precision row-major matrix blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; // Definition of an unaligned, unpadded 6x4 double precision column-major matrix blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor,blaze::unaligned,blaze::unpadded> C; \endcode // \subsubsection matrix_types_static_matrix_alignment Alignment // // In case \c AF is set to \c blaze::aligned, the elements of a blaze::StaticMatrix are possibly // over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX, // AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...) // and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes // for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic // alignment: \code struct Int { int i; }; using MT1 = blaze::StaticMatrix<double,3UL,5UL>; using MT2 = blaze::StaticMatrix<complex<float>,2UL,3UL>; using MT3 = blaze::StaticMatrix<Int,5UL,4UL>; alignof( MT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( MT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( MT3 ); // Evaluates to 'alignof( Int )' \endcode // Note that an aligned blaze::StaticMatrix instance may be bigger than the sum of its data // elements: \code sizeof( MT1 ); // Evaluates to 160 for SSE, and 192 for AVX and AVX-512 sizeof( MT2 ); // Evaluates to 64 for SSE and AVX and 128 for AVX-512 sizeof( MT3 ); // Evaluates to 80; no special alignment requirements \endcode // Please note that for this reason a blaze::StaticMatrix cannot be used in containers using // dynamic memory such as \c std::vector without additionally providing an allocator that can // provide over-aligned memory: \code using Type = blaze::StaticMatrix<double,3UL,5UL>; using Allocator = blaze::AlignedAllocator<Type>; std::vector<Type> v1; // Might be misaligned for AVX or AVX-512 std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512 \endcode // \subsubsection matrix_types_static_matrix_padding Padding // // Adding padding elements to the end of every row or column of a blaze::StaticMatrix can have a // significant impact on the performance. For instance, assuming that AVX is available, then two // padded 3x3 matrices of double precision values can be added with three SIMD addition operations: \code using blaze::StaticMatrix; using blaze::rowMajor; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; StaticMatrix<double,3UL,3UL,rowMajor,aligned,padded> A1, B1, C1; StaticMatrix<double,3UL,3UL,rowMajor,unaligned,unpadded> A2, B2, C2; // ... Initialization C1 = A1 + B1; // AVX-based matrix addition; maximum performance C2 = A2 + B2; // Scalar matrix addition; limited performance sizeof( A1 ); // Evaluates to 96 for SSE and AVX, and 192 for AVX-512 sizeof( A2 ); // Evaluates to 72 for SSE, AVX, and AVX-512 (minimum size) \endcode // Due to padding, the first addition will run at maximum performance. On the flip side, the size // of each matrix instance is increased due to the padding elements. The total size of an instance // depends on the number of elements and width of the available instruction set (16 bytes for // SSE, 32 bytes for AVX, and 64 bytes for AVX-512). // // The second addition will be limited in performance since due to the number of elements some of // the elements need to be handled in a scalar operation. However, the size of an \c unaligned, // \c unpadded blaze::StaticMatrix instance is guaranteed to be the sum of its elements. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \subsection matrix_types_dynamic_matrix DynamicMatrix // // The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included // via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/DynamicMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the storage order, the type of the allocator, and the group tag of // the matrix can be specified via the three template parameters: \code namespace blaze { template< typename Type, bool SO, typename Alloc, typename Tag > class DynamicMatrix; } // namespace blaze \endcode // - \c Type : specifies the type of the matrix elements. DynamicMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the // matrix. The default value is \c blaze::defaultStorageOrder. // - \c Alloc: specifies the type of allocator used to allocate dynamic memory. The default type // of allocator is \c blaze::AlignedAllocator. // - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best // choice for medium to large matrices. The number of rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::DynamicMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode // \subsubsection matrix_types_dynamic_matrix_allocators Allocators // // Via the third template parameter it is possible to customize the memory allocation of a // \c blaze::DynamicMatrix. The provided allocator is expected to represent an implementation of // the allocator concept of the standard library (see for instance // <a href="https://en.cppreference.com/w/cpp/container/vector">std::vector</a> and // <a href="https://en.cppreference.com/w/cpp/memory/allocator">std::allocator</a>). In // addition, the provided allocator is also required to provide properly (over-)aligned memory // for fundamental and complex numbers. For instance, in case SSE vectorization is possible, the // returned memory must be at least 16-byte aligned. In case AVX is active, the memory must be at // least 32-byte aligned, and in case of AVX-512 the memory must be even 64-byte aligned. // // // \n \subsection matrix_types_hybrid_matrix HybridMatrix // // The HybridMatrix class template combines the flexibility of a dynamically sized matrix with // the efficiency and performance of a fixed size matrix. It is implemented as a crossing between // the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static // matrix it uses static stack memory instead of dynamically allocated memory and similar to the // dynamic matrix it can be resized (within the extend of the static memory). It can be included // via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/HybridMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the maximum number of rows and columns, the storage order of the // matrix, the alignment, the padding, and the group tag of the matrix can be specified via the // seven template parameters: \code namespace blaze { template< typename Type, size_t M, size_t N, bool SO, AlignmentFlag AF, PaddingFlag PF, typename Tag > class HybridMatrix; } // namespace blaze \endcode // - \c Type: specifies the type of the matrix elements. HybridMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c M : specifies the maximum number of rows of the matrix. // - \c N : specifies the maximum number of columns of the matrix. Note that it is expected // that HybridMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the // matrix. The default value is \c blaze::defaultStorageOrder. // - \c AF : specifies whether the first element of every row/column is properly aligned with // respect to the available instruction set (SSE, AVX, ...). Possible values are // \c blaze::aligned and \c blaze::unaligned. The default value is // \c blaze::defaultAlignmentFlag. // - \c PF : specifies whether every row/column of the matrix should be padded to maximize the // efficiency of vectorized operations. Possible values are \c blaze::padded and // \c blaze::unpadded. The default value is \c blaze::defaultPaddingFlag. // - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions // are not known at compile time or not fixed at runtime, but whose maximum dimensions are known // at compile time: \code // Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL ); // Definition of an unaligned, unpadded 0x0 double precision column-major matrix and maximum dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor,blaze::unaligned,blaze::unpadded> C; \endcode // \subsubsection matrix_types_hybrid_matrix_alignment Alignment // // In case \c AF is set to \c blaze::aligned, the elements of a blaze::HybridMatrix are possibly // over-aligned to meet the alignment requirements of the available instruction set (SSE, AVX, // AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, \c double, ...) // and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes for SSE, 32 bytes // for AVX, and 64 bytes for AVX-512. All other types are aligned according to their intrinsic // alignment: \code struct Int { int i; }; using MT1 = blaze::HybridMatrix<double,3UL,5UL>; using MT2 = blaze::HybridMatrix<complex<float>,2UL,3UL>; using MT3 = blaze::HybridMatrix<Int,5UL,4UL>; alignof( MT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( MT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( MT3 ); // Evaluates to 'alignof( Int )' \endcode // Note that an aligned blaze::HybridMatrix instance may be bigger than an according unaligned // blaze::HybridMatrix: \code sizeof( MT1 ); // Evaluates to 160 for SSE, 224 for AVX, and 256 for AVX-512 sizeof( MT2 ); // Evaluates to 80 for SSE, 96 for AVX, and 192 for AVX-512 sizeof( MT3 ); // Evaluates to 96; no special alignment requirements \endcode // Please note that for this reason a blaze::HybridMatrix cannot be used in containers using // dynamic memory such as \c std::vector without additionally providing an allocator that can // provide over-aligned memory: \code using Type = blaze::HybridMatrix<double,3UL,5UL>; using Allocator = blaze::AlignedAllocator<Type>; std::vector<Type> v1; // Might be misaligned for AVX or AVX-512 std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512 \endcode // \subsubsection matrix_types_hybrid_matrix_padding Padding // // Adding padding elements to the end of every row or column of a blaze::HybridMatrix can have a // significant impact on the performance. For instance, assuming that AVX is available, then two // padded 3x3 matrices of double precision values can be added with three SIMD addition operations: \code using blaze::HybridMatrix; using blaze::rowMajor; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; HybridMatrix<double,3UL,3UL,rowMajor,aligned,padded> A1, B1, C1; HybridMatrix<double,3UL,3UL,rowMajor,unaligned,unpadded> A2, B2, C2; // ... Initialization C1 = A1 + B1; // AVX-based matrix addition; maximum performance C2 = A2 + B2; // Scalar matrix addition; limited performance sizeof( A1 ); // Evaluates to 112 for SSE, 128 for AVX, and 256 for AVX-512 sizeof( A2 ); // Evaluates to 88 for SSE, AVX, and AVX-512 (minimum size) \endcode // Due to padding, the first addition will run at maximum performance. On the flip side, the size // of each matrix instance is increased due to the padding elements. The total size of an instance // depends on the number of elements and width of the available instruction set (16 bytes for // SSE, 32 bytes for AVX, and 64 bytes for AVX-512). // // The second addition will be limited in performance since due to the number of elements some of // the elements need to be handled in a scalar operation. However, the size of an \c unaligned, // \c unpadded blaze::HybridMatrix instance is guaranteed to be the sum of its elements plus the. // necessary data members to store the current number of rows and columns. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \subsection matrix_types_custom_matrix CustomMatrix // // The blaze::CustomMatrix class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data // structure. Thus in contrast to all other dense matrix types a custom matrix does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom matrix can therefore be considered an alias to the existing // array. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/CustomMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the properties of the given array of elements, the storage order, // and the group tag of the matrix can be specified via the following five template parameters: \code namespace blaze { template< typename Type, AlignmentFlag AF, PaddingFlag PF, bool SO, typename Tag > class CustomMatrix; } // namespace blaze \endcode // - \c Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with // any possibly cv-qualified, non-reference, non-pointer element type. // - \c AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not (\c blaze::aligned // or \c blaze::unaligned). // - \c PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not (\c blaze::padded // or \c blaze::unpadded). // - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the // matrix. The default value is \c blaze::defaultStorageOrder. // - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::CustomMatrix is the right choice if any external array needs to be represented as // a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays using UnalignedUnpadded = CustomMatrix<int,unaligned,unpadded,rowMajor>; std::vector<int> vec( 12UL ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays using UnalignedPadded = CustomMatrix<float,unaligned,padded,columnMajor>; std::unique_ptr<float[]> memory1( new float[40] ); UnalignedPadded B( memory1.get(), 5UL, 6UL, 8UL ); // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays using AlignedUnpadded = CustomMatrix<double,aligned,unpadded,rowMajor>; std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 192UL ) ); AlignedUnpadded C( memory2.get(), 12UL, 13UL, 16UL ); // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded = CustomMatrix<cplx,aligned,padded,columnMajor>; std::unique_ptr<cplx[],Deallocate> memory3( blaze::allocate<cplx>( 112UL ) ); AlignedPadded D( memory3.get(), 7UL, 14UL, 16UL ); \endcode // In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several // special characteristics. All of these result from the fact that a custom matrix is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref matrix_types_custom_matrix_alignment</b> // -# <b>\ref matrix_types_custom_matrix_padding</b> // // \subsubsection matrix_types_custom_matrix_memory_management Memory Management // // The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // matrix data structure. However, this flexibility comes with the price that the user of a custom // matrix is responsible for the resource management. // // The following examples give an impression of several possible types of custom matrices: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); // Definition of a custom 8x12 matrix for an aligned and padded integer array of // capacity 128 (including 8 padding elements per row). Note that the std::unique_ptr // must be guaranteed to outlive the custom matrix! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) ); CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL ); \endcode // \subsubsection matrix_types_custom_matrix_copy_operations Copy Operations // // As with all dense matrices it is possible to copy construct a custom matrix: \code using blaze::CustomMatrix; using blaze::unaligned; using blaze::unpadded; using CustomType = CustomMatrix<int,unaligned,unpadded>; std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix a[1] = 20; // Also modifies the std::vector CustomType B( a ); // Creating a copy of vector a b[2] = 20; // Also affects matrix A and the std::vector \endcode // It is important to note that a custom matrix acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom matrix that is referencing and representing // the same array as the original custom matrix. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom matrices, but modifies the values of the array: \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode // \subsubsection matrix_types_custom_matrix_alignment Alignment // // In case the custom matrix is specified as \c aligned the passed array must adhere to some // alignment restrictions based on the alignment requirements of the used data type and the // used instruction set (SSE, AVX, ...). The restriction applies to the first element of each // row/column: In case of a row-major matrix the first element of each row must be properly // aligned, in case of a column-major matrix the first element of each column must be properly // aligned. For instance, if a row-major matrix is used and AVX is active the first element of // each row must be 32-bit aligned: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using blaze::rowMajor; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 40UL ) ); CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL ); \endcode // In the example, the row-major matrix has six columns. However, since with AVX eight integer // values are loaded together the matrix is padded with two additional elements. This guarantees // that the first element of each row is 32-bit aligned. In case the alignment requirements are // violated, a \c std::invalid_argument exception is thrown. // // \subsubsection matrix_types_custom_matrix_padding Padding // // Adding padding elements to the end of each row/column can have a significant impact on the // performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double // precision matrices can be added via three SIMD addition operations: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using CustomType = CustomMatrix<double,aligned,padded>; std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 12UL ) ); std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 12UL ) ); std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 12UL ) ); // Creating padded custom 3x3 matrix with an additional padding element in each row CustomType A( memory1.get(), 3UL, 3UL, 4UL ); CustomType B( memory2.get(), 3UL, 3UL, 4UL ); CustomType C( memory3.get(), 3UL, 3UL, 4UL ); // ... Initialization C = A + B; // AVX-based matrix addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted a scalar addition has to be used: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; using CustomType = CustomMatrix<double,aligned,unpadded>; std::unique_ptr<double[],Deallocate> memory1( allocate<double>( 9UL ) ); std::unique_ptr<double[],Deallocate> memory2( allocate<double>( 9UL ) ); std::unique_ptr<double[],Deallocate> memory3( allocate<double>( 9UL ) ); // Creating unpadded custom 3x3 matrix CustomType A( memory1.get(), 3UL, 3UL ); CustomType B( memory2.get(), 3UL, 3UL ); CustomType C( memory3.get(), 3UL, 3UL ); // ... Initialization C = A + B; // Scalar matrix addition \endcode // Note that the construction of padded and unpadded aligned matrices looks identical. However, // in case of padded matrices, \b Blaze will zero initialize the padding element and use them // in all computations in order to achieve maximum performance. In case of an unpadded matrix // \b Blaze will ignore the elements with the downside that it is not possible to load a complete // row to an AVX register, which makes it necessary to fall back to a scalar addition. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom matrix the added padding elements must // guarantee that the total number of elements in each row/column is a multiple of the SIMD // vector width. In case of an unaligned padded matrix the number of padding elements can be // greater or equal the number of padding elements of an aligned padded custom matrix. In case // the padding is insufficient with respect to the available instruction set, a // \c std::invalid_argument exception is thrown. // // // \n \subsection matrix_types_uniform_matrix UniformMatrix // // The blaze::UniformMatrix class template is the representation of an arbitrary sized uniform // matrix with elements of arbitrary type. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/UniformMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the storage order, and the group tag of the matrix can be specified // via the three template parameters: \code namespace blaze { template< typename Type, bool SO, typename Tag > class UniformMatrix; } // namespace blaze \endcode // - \c Type: specifies the type of the matrix elements. UniformMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the // matrix. The default value is \c blaze::defaultStorageOrder. // - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::UniformVector is the best choice for uniform matrices of any size. The number of // rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::UniformMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::UniformMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::UniformMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_sparse_matrices Sparse Matrices // <hr> // // \subsection matrix_types_compressed_matrix CompressedMatrix // // The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be // included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/CompressedMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the storage order, and the group tag of the matrix can be specified // via the three template parameters: \code namespace blaze { template< typename Type, bool SO, typename Tag > class CompressedMatrix; } // namespace blaze \endcode // - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the // matrix. The default value is \c blaze::defaultStorageOrder. // - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices: \code // Definition of a 3x4 integral row-major matrix blaze::CompressedMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode // \n \subsection matrix_types_identity_matrix IdentityMatrix // // The blaze::IdentityMatrix class template is the representation of an immutable, arbitrary // sized identity matrix with \f$ N \cdot N \f$ elements of arbitrary type. It can be included // via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/IdentityMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the three // template parameters: \code namespace blaze { template< typename Type, bool SO > class IdentityMatrix; } // namespace blaze \endcode // - \c Type: specifies the type of the matrix elements. IdentityMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the // matrix. The default value is \c blaze::defaultStorageOrder. // - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::IdentityMatrix is the perfect choice to represent an identity matrix: \code // Definition of a 3x3 integral row-major identity matrix blaze::IdentityMatrix<int> A( 3UL ); // Definition of a 6x6 single precision row-major identity matrix blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL ); // Definition of a double precision column-major identity matrix with 0 rows and columns blaze::IdentityMatrix<double,blaze::columnMajor> C; \endcode // \n \subsection matrix_types_zero_matrix ZeroMatrix // // The blaze::ZeroMatrix class template is the representation of an immutable, arbitrary sized // zero matrix with \f$ M \cdot N \f$ elements of arbitrary type. It can be included via the // header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/ZeroMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the elements, the storage order, and the group tag of the matrix can be specified // via the three template parameters: \code namespace blaze { template< typename Type, bool SO, typename Tag > class ZeroMatrix; } // namespace blaze \endcode // - \c Type: specifies the type of the matrix elements. ZeroMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (\c blaze::rowMajor, \c blaze::columnMajor) of the // matrix. The default value is \c blaze::defaultStorageOrder. // - \c Tag : optional type parameter to tag the matrix. The default type is \c blaze::Group0. // See \ref grouping_tagging for details. // // The blaze::ZeroMatrix is the perfect choice to represent a zero matrix: \code // Definition of a 3x5 integral row-major zero matrix blaze::ZeroMatrix<int> A( 3UL, 5UL ); // Definition of a 6x4 single precision row-major zero matrix blaze::ZeroMatrix<float,blaze::rowMajor> B( 6UL, 4UL ); // Definition of a double precision column-major zero matrix with 0 rows and columns blaze::ZeroMatrix<double,blaze::columnMajor> C; \endcode // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations****************************************************************************** /*!\page matrix_operations Matrix Operations // // \tableofcontents // // // \n \section matrix_operations_constructors Constructors // <hr> // // Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules // to be aware of: // - In case the last template parameter (the storage order) is omitted, the matrix is per // default stored in row-major order. // - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection matrix_operations_default_construction Default Construction \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; // All matrices can be default constructed. Whereas the size of // a StaticMatrix is fixed via the second and third template // parameter, the initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major // matrix. All elements are initialized to 0. DynamicMatrix<float> M2; // Instantiation of a single precision dynamic // row-major matrix with 0 rows and 0 columns. DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic // column-major matrix with 0 rows and 0 columns. CompressedMatrix<int> M4; // Instantiation of a compressed integer // row-major matrix of size 0x0. CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision // column-major matrix of size 0x0. \endcode // \n \subsection matrix_operations_size_construction Construction with Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor // that allows to immediately give the matrices a specific number of rows and columns: \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major // matrix. The elements are not initialized. HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major // matrix. The elements are not initialized. CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed // column-major matrix. \endcode // Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately // allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this // example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory. // // // \n \subsection matrix_operations_initialization_constructors Initialization Constructors // // All dense matrix classes offer a constructor for a direct, homogeneous initialization of all // matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements // can be specified. \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major // matrix. All elements are initialized to 7. DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major // matrix. All elements are initialized to 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. \endcode // \n \subsection matrix_operations_array_construction Array Construction // // Alternatively, all dense matrix classes offer a constructor for an initialization with a dynamic // or static array, or with a \c std::array. If the matrix is initialized from a dynamic array, the // constructor expects the dimensions of values provided by the array as first and second argument, // the array as third argument. In case of a static array or \c std::array, the fixed size of the // array is used: \code const std::unique_ptr<double[]> array1( new double[6] ); // ... Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() ); int array2[2][2] = { { 4, -5 }, { -6, 7 } }; blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); const std::array<std::array<float,3UL>,2UL> array3{ { { 1, 2, 3 }, { 4, 5, 6 } } }; blaze::StaticMatrix<int,2UL,3UL> M14( array3 ); \endcode // \n \subsection matrix_operations_initializer_list_construction // // In addition, all dense and sparse matrix classes can be directly initialized by means of an // initializer list: \code blaze::DynamicMatrix<float,columnMajor> M15{ { 3.1F, 6.4F }, { -0.9F, -1.2F }, { 4.8F, 0.6F } }; blaze::CompressedMatrix<int,rowMajor> M16{ { 3 }, { 1 }, { 0, 2 } }; \endcode // Dynamically sized matrices (such as e.g. \ref matrix_types_hybrid_matrix, // \ref matrix_types_dynamic_matrix or \ref matrix_types_compressed_matrix) are sized according // to the size of the initializer list and all their elements are (copy) assigned the values of // the list. For fixed size matrices (such as e.g. \ref matrix_types_static_matrix) missing values // are initialized as default and in case the size of the top-level initializer list does not // match the number of rows of the matrix or the size of any nested list exceeds the number of // columns, a \c std::invalid_argument exception is thrown. In case of sparse matrices, only // the non-zero elements are used to initialize the matrix. // // \n \subsection matrix_operations_copy_construction Copy Construction // // All dense and sparse matrices can be created as a copy of another dense or sparse matrix. \code StaticMatrix<int,5UL,4UL,rowMajor> M17( M6 ); // Instantiation of the dense row-major matrix M16 // as copy of the dense row-major matrix M6. DynamicMatrix<float,columnMajor> M18( M8 ); // Instantiation of the dense column-major matrix M17 // as copy of the sparse column-major matrix M8. CompressedMatrix<double,columnMajor> M19( M7 ); // Instantiation of the compressed column-major matrix // M18 as copy of the dense row-major matrix M7. CompressedMatrix<float,rowMajor> M20( M8 ); // Instantiation of the compressed row-major matrix // M19 as copy of the compressed column-major matrix M8. \endcode // Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different // number of rows and/or columns: \code StaticMatrix<int,4UL,5UL,rowMajor> M21( M6 ); // Runtime error: Number of rows and columns // does not match! StaticMatrix<int,4UL,4UL,columnMajor> M22( M9 ); // Compile time error: Number of columns does // not match! \endcode // \n \section matrix_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse matrices: // \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment, // \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment. // // // \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment // // It is possible to assign the same value to all elements of a dense matrix. All dense matrix // classes provide an according assignment operator: \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; // Setting all integer elements of the StaticMatrix to 4 M1 = 4; // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 \endcode // \n \subsection matrix_operations_array_assignment Array Assignment // // Dense matrices can also be assigned a static array: \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; blaze::DynamicMatrix<double> M3; int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M1 = array1; M2 = array1; M3 = array2; \endcode // Note that the dimensions of the static array have to match the size of a \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the array dimensions: \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 \\ \end{array}\right)\f$ // \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense or sparse // matrix: \code blaze::DynamicMatrix<double> M1; blaze::CompressedMatrix<int> M2; M1 = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M2 = { { 1, 0 }, {}, { 0, 1 }, { 2 } }; \endcode // Dynamically sized matrices (such as e.g. \ref matrix_types_hybrid_matrix, // \ref matrix_types_dynamic_matrix or \ref matrix_types_compressed_matrix) are resized according // to the size of the initializer list and all their elements are (copy) assigned the values of // the list. For fixed size matrices (such as e.g. \ref matrix_types_static_matrix) missing values // are reset to their default value and in case the size of the top-level initializer list does // not match the number of rows of the matrix or the size of any nested list exceeds the number // of columns, a \c std::invalid_argument exception is thrown. In case of sparse matrices, only // the non-zero elements are considered. // // \n \subsection matrix_operations_copy_assignment Copy Assignment // // All kinds of matrices can be assigned to each other. The only restriction is that since a // \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of // rows and in the number of columns. \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); // ... Initialization of the matrices M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix \endcode // \n \subsection matrix_operations_compound_assignment Compound Assignment // // Compound assignment is also available for matrices: addition assignment, subtraction assignment, // and multiplication assignment. In contrast to plain assignment, however, the number of rows // and columns of the two operands have to match according to the arithmetic operation. \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); // ... Initialization of the matrices M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix M1 += M4; // Runtime error: No compound assignment between matrices of different size M1 -= M5; // Compilation error: No compound assignment between matrices of different size M2 *= M6; // OK: Multiplication assignment between two row-major matrices \endcode // Note that the multiplication assignment potentially changes the number of columns of the // target matrix: \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ \end{array}\right)\f$ // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a // multiplication assignment with other square matrices of the same dimensions. // // // \n \section matrix_operations_element_access Element Access // <hr> // // \subsection matrix_operations_function_call_operator_1 Function Call Operator // // The easiest way to access a specific dense or sparse matrix element is via the function call // operator. The indices to access a matrix are zero-based: \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // ... blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = -6.3; \endcode // Since dense matrices allocate enough memory for all contained elements, using the function // call operator on a dense matrix directly returns a reference to the accessed value. In case // of a sparse matrix, if the accessed value is currently not contained in the matrix, the // value is inserted into the matrix prior to returning a reference to the value, which can // be much more expensive than the direct access to a dense matrix. Consider the following // example: \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); ++j ) { ... = M1(i,j); } } \endcode // Although the compressed matrix is only used for read access within the for loop, using the // function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore // the preferred way to traverse the non-zero elements of a sparse matrix is to use iterators. // // \n \subsection matrix_operations_iterators Iterators // // An alternate way to traverse the elements contained in a dense or sparse matrix is by means // of iterators. For that purpose, all matrices provide the \c begin(), \c cbegin(), \c end(), // and \c cend() members functions. Note that it is not possible to traverse all elements of the // matrix, but that it is only possible to traverse elements in a row-wise fashion (in case of // a row-major matrix) or in a column-wise fashion (in case of a column-major matrix). In case of // non-const matrices, \c begin() and \c end() return an \c Iterator, which allows a manipulation // of the (non-zero) value. In case of a constant matrix or in case \c cbegin() or \c cend() are // used a \c ConstIterator is returned. Iterators on dense matrices traverse all elements of the // matrix, including the zero elements. Iterators on sparse matrices only traverse the non-zero // elements. // // The following two examples demonstrate how to traverse the elements of a dense and sparse // matrix, respectively: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<int,rowMajor> M1( 4UL, 6UL ); DynamicMatrix<int,columnMajor> M2( 4UL, 6UL ); // Traversing all elements contained in the row-major matrix by Iterator for( size_t i=0UL; i<M1.rows(); ++i ) { for( DynamicMatrix<int,rowMajor>::Iterator it=M1.begin(i); it!=M1.end(i); ++it ) { *it = ...; // OK: Write access to the value of the element. ... = *it; // OK: Read access to the value of the element. } } // Traversing all elements contained in the column-major matrix by ConstIterator for( size_t j=0UL; j<M2.columns(); ++j ) { for( DynamicMatrix<int,columnMajor>::ConstIterator it=M2.cbegin(j); it!=M2.cend(j); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the value of the element. } } \endcode \code using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; CompressedMatrix<int,rowMajor> M3( 4UL, 6UL ); CompressedMatrix<int,columnMajor> M4( 4UL, 6UL ); // Traversing the non-zero elements contained in the row-major matrix by Iterator for( size_t i=0UL; i<M3.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=M3.begin(i); it!=M3.end(i); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } // Traversing the non-zero elements contained in the column-major matrix by ConstIterator for( size_t j=0UL; j<M4.columns(); ++j ) { for( CompressedMatrix<int,columnMajor>::ConstIterator it=M4.cbegin(j); it!=M4.cend(j); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( size_t i=0UL; i<M3.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=begin( M3, i ); it!=end( M3, i ); ++it ) { // ... } } for( size_t j=0UL; j<M4.columns(); ++j ) { for( CompressedMatrix<int,columnMajor>::ConstIterator it=cbegin( M4, j ); it!=cend( M4, j ); ++it ) { // ... } } \endcode // \n \subsection matrix_operations_data .data() / data() // // Sometimes it is necessary to acquire a pointer to the first element of the underlying array // of a dense matrix. For that purpose the \c data() member function or the free \c data() function // can be used: \code // Instantiating a dynamic vector with 10 elements blaze::DynamicMatrix<int> A( 5UL, 7UL ); A.data(); // Returns a pointer to the first element of the dynamic matrix data( A ); // Same effect as the member function \endcode // Note that you can NOT assume that all matrix elements lie adjacent to each other! The dense // matrix may use techniques such as padding to improve the alignment of the data. Whereas the // number of elements within a row/column are given by the \ref matrix_operations_rows "rows()" and // \ref matrix_operations_columns "columns()" functions, respectively, the total number of elements including // padding is given by the \ref matrix_operations_spacing "spacing()" function. // // // \n \section matrix_operations_element_insertion Element Insertion // <hr> // // Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse // matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements // to the matrix. // // \n \subsection matrix_operations_function_call_operator_2 Function Call Operator // // The first possibility to add elements to a sparse matrix is the function call operator: \code using blaze::CompressedMatrix; CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode // In case the element at the given position is not yet contained in the sparse matrix, it is // automatically inserted. Otherwise the old value is replaced by the new value 2. The operator // returns a reference to the sparse vector element. // // \n \subsection matrix_operations_set .set() // // An alternative to the function call operator is the \c set() function: In case the element is // not yet contained in the matrix the element is inserted, else the element's value is modified: \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); \endcode // \n \subsection matrix_operations_insert .insert() // The insertion of elements can be better controlled via the \c insert() function. In contrast // to the function call operator and the \c set() function it emits an exception in case the // element is already contained in the matrix. In order to check for this case, the \c find() // function can be used: \code // In case the element at position (2,3) is not yet contained in the matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( 2 ) ) M1.insert( 2, 3, 4 ); \endcode // \n \subsection matrix_operations_append .append() // // Although the \c insert() function is very flexible, due to performance reasons it is not // suited for the setup of large sparse matrices. A very efficient, yet also very low-level // way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to // provide enough capacity to insert a new element in the specified row/column. Additionally, // the index of the new element must be larger than the index of the previous element in the // same row/column. Violating these conditions results in undefined behavior! \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2 // ... \endcode // The most efficient way to fill a sparse matrix with elements, however, is a combination of // \c reserve(), \c append(), and the \c finalize() function: \code // Setup of the compressed row-major matrix // // ( 0 1 0 2 0 ) // A = ( 0 0 0 0 0 ) // ( 3 0 0 0 0 ) // blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1 M1.append( 0, 3, 2 ); // Appending the value 2 in row 0 with column index 3 M1.finalize( 0 ); // Finalizing row 0 M1.finalize( 1 ); // Finalizing the empty row 1 to prepare row 2 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode // \note The \c finalize() function has to be explicitly called for each row or column, even // for empty ones! // \note Although \c append() does not allocate new memory, it still invalidates all iterators // returned by the \c end() functions! // // // \n \section matrix_operations_element_removal Element Removal // <hr> // // \subsection matrix_operations_erase .erase() // // The \c erase() member functions can be used to remove elements from a sparse matrix. The // following example gives an impression of the five different flavors of \c erase(): \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Erasing the element at position (21,23) A.erase( 21, 23 ); // Erasing a single element in row 17 via iterator A.erase( 17, A.find( 4 ) ); // Erasing all non-zero elements in the range [7..24] of row 33 A.erase( 33, A.lowerBound( 33, 7 ), A.upperBound( 33, 24 ) ); // Erasing all non-zero elements with a value larger than 9 by passing a unary predicate A.erase( []( int i ){ return i > 9; } ); // Erasing all non-zero elements in the range [30..40] of row 37 with a value larger than 5 CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 37, 30 ) ); CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 37, 40 ) ); A.erase( 37, pos1, pos2, []( int i ){ return i > 5; } ); \endcode // \n \section matrix_operations_element_lookup Element Lookup // <hr> // // A sparse matrix only stores the non-zero elements contained in the matrix. Therefore, whenever // accessing a matrix element at a specific position a lookup operation is required. Whereas the // function call operator is performing this lookup automatically, it is also possible to use the // \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup. // // \n \subsection matrix_operations_find .find() / find() // // The \c find() function can be used to check whether a specific element is contained in the // sparse matrix. It specifically searches for the element at the specified position. In case // the element is found, the function returns an iterator to the element. Otherwise an iterator // just past the last non-zero element of the according row or column (the \c end() iterator) // is returned. Note that the returned iterator is subject to invalidation due to inserting // operations via the function call operator, the \c set() function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the element at position (7,17). In case the element is not // contained in the vector, the end() iterator of row 7 is returned. CompressedMatrix<int,rowMajor>::Iterator pos( A.find( 7, 17 ) ); if( pos != A.end( 7 ) ) { // ... } \endcode // Alternatively, the free function \c find() can be used to find a specific element in a sparse // matrix: \code find( A, 7, 17 ); // Searching the element at position (7,17); same effect as the member function \endcode // \n \subsection matrix_operations_lowerbound .lowerBound() / lowerBound() // // In case of a row-major matrix, this function returns a row iterator to the first element with // an index not less then the given column index. In case of a column-major matrix, the function // returns a column iterator to the first element with an index not less then the given row // index. In combination with the \c upperBound() function this function can be used to create a // pair of iterators specifying a range of indices. Note that the returned iterator is subject // to invalidation due to inserting operations via the function call operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the lower bound of column index 17 in row 7. CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 7, 17 ) ); // Searching the upper bound of column index 28 in row 7 CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 7, 28 ) ); // Erasing all elements in the specified range A.erase( 7, pos1, pos2 ); \endcode // Alternatively, the free function \c lowerBound() can be used to: \code lowerBound( A, 7, 17 ); // Searching the lower bound of (7,17); same effect as the member function \endcode // \n \subsection matrix_operations_upperbound .upperBound() / upperBound() // // In case of a row-major matrix, this function returns a row iterator to the first element with // an index greater then the given column index. In case of a column-major matrix, the function // returns a column iterator to the first element with an index greater then the given row // index. In combination with the \c lowerBound() function this function can be used to create a // pair of iterators specifying a range of indices. Note that the returned iterator is subject // to invalidation due to inserting operations via the function call operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,columnMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the lower bound of row index 17 in column 9. CompressedMatrix<int,columnMajor>::Iterator pos1( A.lowerBound( 17, 9 ) ); // Searching the upper bound of row index 28 in column 9 CompressedMatrix<int,columnMajor>::Iterator pos2( A.upperBound( 28, 9 ) ); // Erasing all elements in the specified range A.erase( 9, pos1, pos2 ); \endcode // Alternatively, the free function \c upperBound() can be used to: \code upperBound( A, 28, 9 ); // Searching the upper bound of (28,9); same effect as the member function \endcode // \n \section matrix_operations_non_modifying_operations Non-Modifying Operations // <hr> // // \subsection matrix_operations_rows .rows() / rows() // // The current number of rows of a matrix can be acquired via the \c rows() member function: \code // Instantiating a dynamic matrix with 10 rows and 8 columns blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 // Instantiating a compressed matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 \endcode // Alternatively, the free functions \c rows() can be used to query the current number of rows of // a matrix. In contrast to the member function, the free function can also be used to query the // number of rows of a matrix expression: \code rows( M1 ); // Returns 10, i.e. has the same effect as the member function rows( M2 ); // Returns 8, i.e. has the same effect as the member function rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix \endcode // \n \subsection matrix_operations_columns .columns() / columns() // // The current number of columns of a matrix can be acquired via the \c columns() member function: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns 7 \endcode // There is also a free function \c columns() available, which can also be used to query the number // of columns of a matrix expression: \code columns( M1 ); // Returns 8, i.e. has the same effect as the member function columns( M2 ); // Returns 7, i.e. has the same effect as the member function columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix \endcode // \subsection matrix_operations_size size() // // The \c size() function returns the total number of elements of a matrix: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); size( M1 ); // Returns 48 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); size( M2 ); // Returns 56 \endcode // \subsection matrix_operations_spacing .spacing() / spacing() // // The total number of elements of a row or column of a dense matrix, including potential padding // elements, can be acquired via the \c spacing member function. In case of a row-major matrix // (i.e. in case the storage order is set to blaze::rowMajor) the function returns the spacing // between two rows, in case of a column-major matrix (i.e. in case the storage flag is set to // blaze::columnMajor) the function returns the spacing between two columns: \code // Instantiating a row-major dynamic matrix with 7 rows and 8 columns blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 8UL ); M1.spacing(); // Returns the total number of elements in a row // Instantiating a column-major dynamic matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.spacing(); // Returns the total number of element in a column \endcode // Alternatively, the free functions \c spacing() can be used to query the current number of // elements in a row/column. \code spacing( M1 ); // Returns the total number of elements in a row spacing( M2 ); // Returns the total number of elements in a column \endcode // \n \subsection matrix_operations_capacity .capacity() / capacity() // // The \c capacity() member function returns the internal capacity of a dense or sparse matrix. // Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of // a dense matrix the capacity will always be greater or equal than the total number of elements // of the matrix. In case of a sparse matrix, the capacity will usually be much less than the // total number of elements. \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least 35 M2.capacity(); // Returns at least 28 \endcode // There is also a free function \c capacity() available to query the capacity. However, please // note that this function cannot be used to query the capacity of a matrix expression: \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function capacity( M1 * M2 ); // Compilation error! \endcode // \n \subsection matrix_operations_nonzeros .nonZeros() / nonZeros() // // For both dense and sparse matrices the current number of non-zero elements can be queried // via the \c nonZeros() member function. In case of matrices there are two flavors of the // \c nonZeros() function: One returns the total number of non-zero elements in the matrix, // the second returns the number of non-zero elements in a specific row (in case of a row-major // matrix) or column (in case of a column-major matrix). Sparse matrices directly return their // number of non-zero elements, dense matrices traverse their elements and count the number of // non-zero elements. \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); // ... Initializing the dense matrix M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2 \endcode \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); // ... Initializing the sparse matrix M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3 \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in a // matrix expression. However, the result is not the exact number of non-zero elements, but may be // a rough estimation: \code nonZeros( M1 ); // Has the same effect as the member function nonZeros( M1, 2 ); // Has the same effect as the member function nonZeros( M2 ); // Has the same effect as the member function nonZeros( M2, 3 ); // Has the same effect as the member function nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression \endcode // \n \subsection matrix_operations_isempty isEmpty() // // The \c isEmpty() function returns whether the total number of elements of the matrix is zero: \code blaze::DynamicMatrix<int> A; // Create an empty matrix isEmpty( A ); // Returns true A.resize( 5, 0 ); // Resize to a 5x0 matrix isEmpty( A ); // Returns true A.resize( 5, 3 ); // Resize to a 5x3 matrix isEmpty( A ); // Returns false \endcode // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number // elements: \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode // If at least one element of the matrix is not-a-number, the function returns \c true, otherwise // it returns \c false. // // // \n \subsection matrix_operations_isinf isinf() // // The \c isinf() function checks the given dense or sparse matrix for infinite (\c inf) elements: \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isinf( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isinf( A ) ) { ... } \endcode // If at least one element of the matrix is infinite, the function returns \c true, otherwise it // returns \c false. // // // \n \subsection matrix_operations_isfinite isfinite() // // The \c isfinite() function checks if all elements of the given dense or sparse matrix are // finite elements (i.e. normal, subnormal or zero elements, but not infinite or NaN): \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isfinite( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isfinite( A ) ) { ... } \endcode // If all elements of the matrix are finite, the function returns \c true, otherwise it returns // \c false. // // // \n \subsection matrix_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse matrix is in default state: \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization if( isDefault( A ) ) { ... } \endcode // A matrix is in default state if it appears to just have been default constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in // default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // submatrices) is in default state if all its elements are in default state. For instance, in case // the matrix is instantiated for a built-in integral or floating point data type, the function // returns \c true in case all matrix elements are 0 and \c false in case any matrix element is // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // // Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the // number of columns) can be checked via the \c isSquare() function: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( isSquare( A ) ) { ... } \endcode // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix // is symmetric: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isSymmetric( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be symmetric! // // // \n \subsection matrix_operations_isUniform isUniform() // // In order to check if all matrix elements are identical, the \c isUniform() function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isUniform( A ) ) { ... } \endcode // Note that in case of a sparse matrix also the zero elements are also taken into account! // // // \n \subsection matrix_operations_isZero isZero() // // In order to check if all matrix elements are zero, the \c isZero() function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isZero( A ) ) { ... } \endcode // \n \subsection matrix_operations_islower isLower() // // Via the \c isLower() function it is possible to check whether a dense or sparse matrix is // lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower triangular! // // // \n \subsection matrix_operations_isunilower isUniLower() // // Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is // lower unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower unitriangular! // // // \n \subsection matrix_operations_isstrictlylower isStrictlyLower() // // Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix // is strictly lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly lower triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // // Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is // upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper triangular! // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is // upper unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix // is strictly upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly upper triangular! // // // \n \subsection matrix_operations_isdiagonal isDiagonal() // // The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix, // i.e. if it has only elements on its diagonal and if the non-diagonal elements are default // elements: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isDiagonal( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be diagonal! // // // \n \subsection matrix_operations_isidentity isIdentity() // // The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix, // i.e. if all diagonal elements are 1 and all non-diagonal elements are 0: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isIdentity( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be identity matrices! // // // \n \subsection matrix_operations_ispositivedefinite isPositiveDefinite() // // The \c isPositiveDefinite() function checks if the given dense matrix is positive definite. \code blaze::DynamicMatrix<double> A; // ... Initialization if( isPositiveDefinite( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be positive definite! // // \note The \c isPositiveDefinite() function can only be used for dense matrices with \c float, // \c double, \c complex<float> or \c complex<double> element type. The attempt to call the // function with matrices of any other element type or with a sparse matrix results in a compile // time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if a // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operations_matrix_trans trans() // // Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into // a column-major matrix and vice versa: \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); M1 = M2; // Assigning a column-major matrix to a row-major matrix M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major matrices \endcode // \n \subsection matrix_operations_ctrans ctrans() // // The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian // conjugate, or transjugate) can be computed via the \c ctrans() function: \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix \endcode // \n \subsection matrix_operations_reverse reverse() // // Via the \c reverse() function is is possible to reverse the rows or columns of a dense or sparse // matrix. The following examples gives an impression of both alternatives: \code blaze::DynamicMatrix<int,rowMajor> A{ { 1, 0, 2, 3 }, { 2, 4, 0, 1 }, { 0, 3, 1, 0 } }; blaze::DynamicMatrix<int> B; // Reversing the rows result in the matrix // // ( 0 3 1 0 ) // ( 2 4 0 1 ) // ( 1 0 2 3 ) // B = reverse<rowwise>( A ); // Reversing the columns result in the matrix // // ( 3 2 0 1 ) // ( 1 0 4 2 ) // ( 0 1 3 0 ) // B = reverse<columnwise>( A ); \endcode // \n \subsection matrix_operations_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given matrix expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a lower and a strictly lower dense // matrix: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::StrictlyLowerMatrix; LowerMatrix< DynamicMatrix<double> > A; StrictlyLowerMatrix< DynamicMatrix<double> > B; // ... Resizing and initialization auto C = evaluate( A * B ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary matrix is created and no copy operation is performed. Instead, the result // is directly written to the target matrix due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No temporary & no copy operation LowerMatrix< DynamicMatrix<double> > E( A * B ); // Temporary & copy operation DynamicMatrix<double> F( A * B ); // Temporary & copy operation D = evaluate( A * B ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicMatrix<double> A, B, C, D; D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix D = A + eval( B * C ); // No creation of a temporary matrix \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // \n \subsection matrix_operations_noalias noalias() // // The \b Blaze library is able to reliably detect aliasing during the assignment of matrices. // In case the aliasing would lead to an incorrect result, \b Blaze introduces an intermediate // temporary of the appropriate type to break the aliasing. For instance, in the following // example \b Blaze performs an alias detection in both assignments, but only, in the second // assignment it detects a problematic aliasing and uses an intermediate temporary in order // to be able to compute the correct result: \code blaze::DynamicMatrix<double> A, B; A = A + B; // No problematic aliasing of A, no intermediate temporary is required. A = A * B; // Problematic aliasing of A; intermediate temporary required! \endcode // The detection of aliasing effects, however, takes a small runtime effort. In order to disable // the aliasing detection, the \c noalias() function can be used: \code blaze::DynamicMatrix<double> A, B; A = noalias( A + B ); // No alias detection performed, no intermediate temporary. A = noalias( A * B ); // No alias detection performed, no intermediate temporary. // Note that the final result will be incorrect! \endcode // \warning The \c noalias() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Using \c noalias() in a situation // where an aliasing effect occurs leads to undefined behavior (which can be violated invariants // or wrong computation results)! // // \n \subsection matrix_operations_nosimd nosimd() // // By default, \b Blaze attempts to vectorize all operations by means of SSE, AVX, etc. in order // to achieve maximum performance. However, via the \c nosimd() operation it is possible to disable // the SIMD evaluation of any operation: \code blaze::DynamicMatrix<double> A, B; A = nosimd( A + B ); // Disables SIMD for the matrix/matrix addition A = nosimd( A * B ); // Disables SIMD for the matrix/matrix multiplication \endcode // Please note that the main purpose of the \c nosimd() operation is to enable an easy performance // comparison between the vectorized and non-vectorized evaluation. Using the \c nosimd() operation // will likely result in significantly reduced performance! // // // \n \section matrix_operations_modifying_operations Modifying Operations // <hr> // // \subsection matrix_operations_resize_reserve .resize() / .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template // parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns // of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, 2UL ); // Adapting the number of rows and columns via the resize() function. The (optional) // third parameter specifies whether the existing elements should be preserved. Per // default, the existing elements are preserved. M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type // remain uninitialized, elements of class type are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the // new elements are NOT initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost. \endcode // Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices) // on the matrix: \code blaze::DynamicMatrix<int,rowMajor> M1( 10UL, 20UL ); // Creating a 10x20 matrix auto row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view \endcode // When the internal capacity of a matrix is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // Returns 0 M1.capacity(); // Returns at least 100 \endcode // Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or // column (for a column-major matrix): \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1 \endcode // \n \subsection matrix_operations_shrinkToFit .shrinkToFit() // // The internal capacity of matrices with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 integer matrix M1.resize( 10UL, 10UL ); // Resize to 10x10, but the capacity is preserved M1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c rows() times // \c columns(). Please also note that in case a reallocation occurs, all iterators (including // \c end() iterators), all pointers and references to elements of this matrix are invalidated. // // // \subsection matrix_operations_reset_clear reset() / clear // // In order to reset all elements of a dense or sparse matrix, the \c reset() function can be // used. The number of rows and columns of the matrix are preserved: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements M1.rows(); // Returns 4: size and capacity remain unchanged \endcode // Alternatively, only a single row or column of the matrix can be resetted: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix \endcode // In order to reset a row of a column-major matrix or a column of a row-major matrix, use a // row or column view (see \ref views_rows and views_colums). // // In order to return a matrix to its default state (i.e. the state of a default constructed // matrix), the \c clear() function can be used: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire matrix M1.rows(); // Returns 0: size is reset, but capacity remains unchanged \endcode // \n \subsection matrix_operations_matrix_transpose transpose() // // In addition to the non-modifying \c trans() function, matrices can be transposed in-place via // the \c transpose() function: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); transpose( M ); // In-place transpose operation. M = trans( M ); // Same as above \endcode // Note however that the transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_ctranspose ctranspose() // // The \c ctranspose() function can be used to perform an in-place conjugate transpose operation: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); // Same as above \endcode // Note however that the conjugate transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_swap swap() // // Via the \c \c swap() function it is possible to completely swap the contents of two matrices // of the same type: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode // \n \section matrix_operations_arithmetic_operations Arithmetic Operations // <hr> // // \subsection matrix_operations_min_max min() / max() // // The \c min() and \c max() functions can be used for a single matrix, multiple matrices, and // a matrix and a scalar. // // <b>Single Matrix</b> // // If passed a single matrix, the functions return the smallest and largest element of the given // dense matrix or the smallest and largest non-zero element of the given sparse matrix, // respectively: \code blaze::StaticMatrix<int,2UL,3UL> A{ { -5, 2, 7 }, { -4, 0, 1 } }; min( A ); // Returns -5 max( A ); // Returns 7 \endcode \code blaze::CompressedMatrix<int> B{ { 1, 0, 3 }, { 0, 0, 0 } }; min( B ); // Returns 1 max( B ); // Returns 3 \endcode // For more information on the unary \c min() and \c max() reduction operations see the // \ref matrix_operations_reduction_operations section. // // <b>Multiple Matrices</b> // // If passed two or more dense matrices, the \c min() and \c max() functions compute the // componentwise minimum or maximum of the given matrices, respectively: \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> C{ { -5, 1, -7 }, { 4, 1, 0 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> D{ { -5, 3, 0 }, { 2, 2, -2 } }; min( A, C ); // Results in the matrix ( -5, 1, -7 ) ( -4, 0, 0 ) max( A, C, D ); // Results in the matrix ( -5, 3, 7 ) ( 4, 2, 1 ) \endcode // Please note that sparse matrices can only be used in the unary \c min() and \c max() functions. // Also note that all forms of the \c min() and \c max() functions can be used to compute the // smallest and largest element of a matrix expression: \code min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix \endcode // <b>Matrix and Scalar</b> // // If passed a dense matrix and a scalar, the \c min() and \c max() functions compute the // componentwise minimum or maximum between the given matrix and a uniform matrix represented by // the scalar value: \code min( A, 0 ); // Results in the matrix ( 0, 2, 7 ) ( 0, 0, 1 ) min( 0, A ); // Results in the matrix ( 0, 2, 7 ) ( 0, 0, 1 ) max( A, 0 ); // Results in the matrix ( -5, 0, 0 ) ( -4, 0, 0 ) max( 0, A ); // Results in the matrix ( -5, 0, 0 ) ( -4, 0, 0 ) \endcode // \n \subsection matrix_operators_softmax softmax() // // The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called // the normalized exponential function, of a given dense matrix can be computed via \c softmax(). // The resulting dense matrix consists of real values in the range (0..1], which add up to 1. \code blaze::StaticMatrix<double,3UL,3UL> A{ { 1.0, 2.0, 3.0 } , { 4.0, 1.0, 2.0 } , { 3.0, 4.0, 1.0 } }; blaze::StaticMatrix<double,3UL,3UL> B; // Evaluating the softmax function B = softmax( A ); // Results in ( 0.0157764 0.0428847 0.116573 ) // ( 0.316878 0.0157764 0.0428847 ) // ( 0.116573 0.316878 0.0157764 ) double b = sum( B ); // Results in 1 \endcode // Alternatively it is possible to compute a row- or columnwise \c softmax() function. The // resulting dense matrix consists of real values in the range (0..1], which add up to the number // of rows or columns, respectively. \code using blaze::rowwise; using blaze::columnwise; blaze::StaticMatrix<double,3UL,3UL> C, D; // Evaluating the rowwise softmax function C = softmax<rowwise>( A ); // Results in ( 0.0900306 0.244728 0.665241 ) // ( 0.843795 0.0420101 0.114195 ) // ( 0.259496 0.705385 0.035119 ) double c = sum( C ); // Results in 3 (the number of rows of A) // Evaluating the columnwise softmax function D = softmax<columnwise>( A ); // Results in ( 0.035119 0.114195 0.665241 ) // ( 0.705385 0.0420101 0.244728 ) // ( 0.259496 0.843795 0.0900306 ) double d = sum( D ); // Results in 3 (the number of columns of A) \endcode // \n \subsection matrix_operators_trace trace() // // The \c trace() function sums the diagonal elements of a square dense or sparse matrix: \code blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 } , { -4, -5, 6 } , { 7, -8, -9 } }; trace( A ); // Returns the sum of the diagonal elements, i.e. -15 \endcode // In case the given matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // // \n \subsection matrix_operations_matrix_determinant det() // // The determinant of a square dense matrix can be computed by means of the \c det() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization double d = det( A ); // Compute the determinant of A \endcode // In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // \note The \c det() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if a // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operators_rank rank() // // The \c rank() function computes the rank of a given dense matrix: \code blaze::DynamicMatrix<double> A( 5UL, 8UL ); // ... Initialization rank( A ); \endcode // The rank is determined as the number of singular values greater than a given tolerance. This // tolerance is computed as \code tolerance = max(m,n) * max(s) * epsilon, \endcode // where \c m is the number of rows of the dense matrix, \c n is the number of columns of the // dense matrix, \c max(s) is the maximum singular value of the dense matrix and \c epsilon is // the difference between 1 and the least value greater than 1 that is representable by the // floating point type of the singular values. // // \note The \c rank() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if a // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a matrix. // For instance, the following computation \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_sign sign() // // The \c sign() function can be used to evaluate the sign of each element of a matrix \a A. For // each element \c (i,j) the corresponding result is 1 if \a A(i,j) is greater than zero, 0 if // \a A(i,j) is zero, and -1 if \a A(i,j) is less than zero. For instance, the following use of // the \c sign() function \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, 0 }, { 4, 0, -6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( sign( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} -1 & 1 & 0 \\ 1 & 0 & -1 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a matrix, respectively: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = floor( A ); // Rounding down each element of the matrix B = ceil ( A ); // Rounding up each element of the matrix B = trunc( A ); // Truncating each element of the matrix B = round( A ); // Rounding each element of the matrix \endcode // \n \subsection matrix_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse matrix to compute the complex // conjugate of each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode // Additionally, matrices can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as above \endcode // \n \subsection matrix_operators_real real() // // The \c real() function can be used on a dense or sparse matrix to extract the real part of // each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode // \n \subsection matrix_operators_imag imag() // // The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part // of each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the imaginary part of each matrix element // ( 0 -1 ) // ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode // \n \subsection matrix_operators_arg arg() // // The \c arg() function can be used on a dense or sparse matrix to compute the phase angle for // each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the phase angle of each matrix element // ( 0.0 -2.67795 ) // ( 0.785398 1.5708 ) StaticMatrix<double,2UL,2UL> B; B = arg( A ); \endcode // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // matrix can be computed: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; B = sqrt( A ); // Computes the square root of each element C = invsqrt( A ); // Computes the inverse square root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a matrix: \code blaze::DynamicMatrix<double> A, B, C; B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_hypot hypot() // // The \c hypot() function can be used to compute the componentwise hypotenous for a pair of // dense matrices: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; C = hypot( A, B ); // Computes the componentwise hypotenuous \endcode // \n \subsection matrix_operators_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a matrix to a specific range: \code blaze::DynamicMatrix<double> A, B; B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a matrix. // If passed a matrix and a numeric exponent, the function computes the exponential value of each // element of the matrix using the same exponent. If passed a second matrix, the function computes // the componentwise exponential value: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; C = pow( A, 1.2 ); // Computes the exponential value of each element C = pow( A, B ); // Computes the componentwise exponential value \endcode // \n \subsection matrix_operators_exp exp() / exp2() / exp10() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // matrix, respectively: \code blaze::HybridMatrix<double,3UL,3UL> A, B; B = exp( A ); // Computes the base e exponential of each element B = exp2( A ); // Computes the base 2 exponential of each element B = exp10( A ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_log log() / log2() / log10() / log1p() / lgamma() // // The \c log(), \c log2(), \c log10(), \c log1p() and \c lgamma() functions can be used to // compute the natural, binary and common logarithm of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = log( A ); // Computes the natural logarithm of each element B = log2( A ); // Computes the binary logarithm of each element B = log10( A ); // Computes the common logarithm of each element B = log1p( A ); // Computes the natural logarithm of x+1 of each element B = lgamma( A ); // Computes the natural logarithm of the absolute value of the gamma function \endcode // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sin( A ); // Computes the sine of each element of the matrix B = cos( A ); // Computes the cosine of each element of the matrix B = tan( A ); // Computes the tangent of each element of the matrix B = asin( A ); // Computes the inverse sine of each element of the matrix B = acos( A ); // Computes the inverse cosine of each element of the matrix B = atan( A ); // Computes the inverse tangent of each element of the matrix \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix \endcode // \n \subsection matrix_operations_atan2 atan2() // // The multi-valued inverse tangent is available for a pair of dense matrices: \code blaze::DynamicMatrix<double> A, B, C; C = atan2( A, B ); // Computes the componentwise multi-valued inverse tangent \endcode // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = erf( A ); // Computes the error function of each element B = erfc( A ); // Computes the complementary error function of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_map map() / forEach() // // Via the \c map() functions it is possible to execute componentwise custom operations on matrices. // The unary \c map() function can be used to apply a custom operation on each element of a // dense or sparse matrix. For instance, the following example demonstrates a custom square root // computation via a lambda: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode // The N-ary \c map() functions can be used to apply an operation componentwise to the elements // of N dense matrices (where \f$ N <= 6 \f$). The following example demonstrates the merging of // two matrices of double precision values into a matrix of double precision complex numbers: \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; blaze::DynamicMatrix< complex<double> > cplx; // Creating the matrix // ( ( 2.1, 0.3) (-4.2, 1.4) ) // ( ( 1.0, 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used, but the function might be deprecated in future releases of \b Blaze. // // // \n \subsection matrix_operations_select select() // // The \c select() function performs a componentwise, conditional selection of elements. Given // the three dense matrices \c cond, \c A, and \c B, in case an element in the \c cond vector // evaluates to \c true, the according element of \a A is selected, in case the \a cond element // evaluates to \c false, the according element of \a B is selected. The following example // demonstrates the use of the \a select() function: \code blaze::DynamicMatrix<bool> cond{ { true, false }, { true false } }; blaze::DynamicMatrix<int> A{ { 1, -1 }, { 1, -1 } }; blaze::DynamicMatrix<int> B{ { -2, 2 }, { -2, 2 } }; blaze::DynamicMatrix<int> C; // ... Resizing and initialization C = select( cond, A, B ); // Results in ( 1, 2 ) ( 1, 2 ) \endcode // \n \section matrix_operations_reduction_operations Reduction Operations // <hr> // // \subsection matrix_operations_reduction_operations_reduce reduce() // // The \c reduce() function performs either a total reduction, a rowwise reduction or a columnwise // reduction of the elements of the given dense matrix or the non-zero elements of the given sparse // matrix. The following examples demonstrate the total reduction of a dense and sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } ); \endcode \code blaze::CompressedMatrix<double> A; // ... Resizing and initialization const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } ); \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c reduce() function performs a // column-wise or row-wise reduction, respectively. In case \c blaze::columnwise is specified, the // (non-zero) elements of the matrix are reduced column-wise and the result is a row vector. In // case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are reduced row-wise // and the result is a column vector: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; blaze::DynamicVector<double,rowVector> colsum1, colsum2; // ... Resizing and initialization colsum1 = reduce<columnwise>( A, blaze::Add() ); colsum2 = reduce<columnwise>( B, []( double a, double b ){ return a + b; } ); \endcode \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; blaze::DynamicVector<double,columnVector> rowsum1, rowsum2; // ... Resizing and initialization rowsum1 = reduce<rowwise>( A, blaze::Add() ); rowsum2 = reduce<rowwise>( B, []( double a, double b ){ return a + b; } ); \endcode // As demonstrated in the examples it is possible to pass any binary callable as custom reduction // operation. However, for instance in the case of lambdas the vectorization of the reduction // operation is compiler dependent and might not perform at peak performance. However, it is also // possible to create vectorized custom operations. See \ref custom_operations for a detailed // overview of the possibilities of custom operations. // // Please note that the evaluation order of the \c reduce() function is unspecified. Thus the // behavior is non-deterministic if the given reduction operation is not associative or not // commutative. Also, the operation is undefined if the given reduction operation modifies the // values. // // \n \subsection matrix_operations_reduction_operations_sum sum() // // The \c sum() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of addition: \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalsum = sum( A ); // Results in 10 \endcode \code blaze::CompressedMatrix<int> a{ { 1, 2 }, { 3, 4 } }; const int totalsum = sum( A ); // Results in 10 \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c sum() function performs a // column-wise or row-wise summation, respectively. In case \c blaze::columnwise is specified, // the (non-zero) elements of the matrix are summed up column-wise and the result is a row vector. // In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are summed up // row-wise and the result is a column vector: \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colsum1, colsum2; colsum1 = sum<columnwise>( A ); // Results in ( 2, 3, 6 ) colsum2 = sum<columnwise>( B ); // Same result \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowsum1, rowsum2; rowsum1 = sum<rowwise>( A ); // Results in ( 3, 8 ) rowsum2 = sum<rowwise>( B ); // Same result \endcode // Please note that the evaluation order of the \c sum() function is unspecified. // // \n \subsection matrix_operations_reduction_operations_prod prod() // // The \c prod() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of multiplication: \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalprod = prod( A ); // Results in 24 \endcode \code blaze::CompressedMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalprod = prod( A ); // Results in 24 \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c prod() function performs a // column-wise or row-wise multiplication, respectively. In case \c blaze::columnwise is specified, // the (non-zero) elements of the matrix are multiplied column-wise and the result is a row vector. // In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are multiplied // row-wise and the result is a column vector: \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colprod1, colprod2; colprod1 = prod<columnwise>( A ); // Results in ( 1, 0, 8 ) colprod2 = prod<columnwise>( A ); // Results in ( 1, 3, 8 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowprod1, rowprod2; rowprod1 = prod<rowwise>( A ); // Results in ( 0, 12 ) rowprod2 = prod<rowwise>( A ); // Results in ( 2, 12 ) \endcode // Please note that the evaluation order of the \c prod() function is unspecified. // // \n \subsection matrix_operations_reduction_operations_min min() // // The unary \c min() function returns the smallest element of the given dense matrix or the // smallest non-zero element of the given sparse matrix. This function can only be used for // element types that support the smaller-than relationship. In case the given matrix currently // has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of // fundamental data types). \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalmin = min( A ); // Results in 1 \endcode \code blaze::CompressedMatrix<int> A{ { 1, 0 }, { 3, 0 } }; const int totalmin = min( A ); // Results in 1 \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed matrix has only 2 non-zero elements. // However, the minimum of this matrix is 1. // // By specifying \c blaze::columnwise or \c blaze::rowwise the \c min() function determines the // smallest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise // is specified, the smallest (non-zero) element of each column is determined and the result is // a row vector. In case \c blaze::rowwise is specified, the smallest (non-zero) element of each // row is determined and the result is a column vector. \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colmin1, colmin2; colmin1 = min<columnwise>( A ); // Results in ( 1, 0, 2 ) colmin2 = min<columnwise>( B ); // Results in ( 1, 3, 2 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowmin1, rowmin2; rowmin1 = min<rowwise>( A ); // Results in ( 0, 1 ) rowmin2 = min<rowwise>( B ); // Results in ( 1, 1 ) \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. // // \n \subsection matrix_operations_reduction_operations_max max() // // The unary \c max() function returns the largest element of the given dense matrix or the // largest non-zero element of the given sparse matrix. This function can only be used for // element types that support the smaller-than relationship. In case the given matrix currently // has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of // fundamental data types). \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalmax = max( A ); // Results in 4 \endcode \code blaze::CompressedMatrix<int> A{ { -1, 0 }, { -3, 0 } }; const int totalmax = max( A ); // Results in -1 \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed matrix has only 2 non-zero elements. // However, the maximum of this matrix is -1. // // By specifying \c blaze::columnwise or \c blaze::rowwise the \c max() function determines the // largest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise // is specified, the largest (non-zero) element of each column is determined and the result is // a row vector. In case \c blaze::rowwise is specified, the largest (non-zero) element of each // row is determined and the result is a column vector. \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; blaze::DynamicVector<int,rowVector> colmax1, colmax2; colmax1 = max<columnwise>( A ); // Results in ( 1, 3, 4 ) colmax2 = max<columnwise>( B ); // Results in ( -1, -3, -2 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; blaze::DynamicVector<int,columnVector> rowmax1, rowmax2; rowmax1 = max<rowwise>( A ); // Results in ( 2, 4 ) rowmax2 = max<rowwise>( B ); // Results in ( -1, -1 ) \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. // // // \n \section matrix_operations_norms Norms // <hr> // // \subsection matrix_operations_norms_norm norm() // // The \c norm() function computes the L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = norm( A ); const double norm2 = norm( B ); \endcode // \n \subsection matrix_operations_norms_sqrnorm sqrNorm() // // The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = sqrNorm( A ); const double norm2 = sqrNorm( B ); \endcode // \n \subsection matrix_operations_norms_l1norm l1Norm() // // The \c l1Norm() function computes the squared L1 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = l1Norm( A ); const double norm2 = l1Norm( B ); \endcode // \n \subsection matrix_operations_norms_l2norm l2Norm() // // The \c l2Norm() function computes the squared L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = l2Norm( A ); const double norm2 = l2Norm( B ); \endcode // \n \subsection matrix_operations_norms_l3norm l3Norm() // // The \c l3Norm() function computes the squared L3 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = l3Norm( A ); const double norm2 = l3Norm( B ); \endcode // \n \subsection matrix_operations_norms_l4norm l4Norm() // // The \c l4Norm() function computes the squared L4 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = l4Norm( A ); const double norm2 = l4Norm( B ); \endcode // \n \subsection matrix_operations_norms_lpnorm lpNorm() // // The \c lpNorm() function computes the general Lp norm of the given dense or sparse matrix, // where the norm is specified by either a compile time or a runtime argument: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = lpNorm<2>( A ); // Compile time argument const double norm2 = lpNorm( B, 2.3 ); // Runtime argument \endcode // \n \subsection matrix_operations_norms_maxnorm linfNorm() / maxNorm() // // The \c linfNorm() and \c maxNorm() functions compute the infinity/maximum norm of the given // dense or sparse matrix: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; // ... Resizing and initialization const double norm1 = linfNorm( A ); const double norm2 = maxNorm( B ); \endcode // \n \section matrix_operations_scalar_expansion Scalar Expansion // <hr> // // By means of the \c uniform() function it is possible to expand a scalar value into a dense, // uniform matrix. By default, the resulting uniform matrix is a row-major matrix, but it is // possible to specify the storage order explicitly: \code using blaze::rowMajor; int scalar = 5; blaze::DynamicMatrix<int,rowMajor> A; // ... Resizing and initialization // Expansion of 'scalar' to a 3x5 row-major matrix // // ( 5 5 5 5 5 ) // ( 5 5 5 5 5 ) // ( 5 5 5 5 5 ) // A = uniform( 3UL, 5UL, scalar ); A = uniform<columnMajor>( 3UL, 5UL, scalar ); \endcode // \n \section matrix_operations_matrix_repetition Matrix Repetition // <hr> // // Via the \c repeat() function it is possible to repeat a dense or sparse matrix multiple times // to represent a larger matrix. Repeating a row-major matrix results in a row-major matrix, // repeating a column-major matrix results in a column-major matrix. As demonstrated by the // following examples, \c repeat() can be used with both runtime and compile time parameters: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<int,rowMajor> A1{ { 1, 0, -2 }, { 0, 5, 0 } }; blaze::CompressedMatrix<int,columnMajor> B1{ { 0, -1 }, { 0, 4 }, { 7, 0 } }; blaze::DynamicMatrix<int,rowMajor> A2; blaze::CompressedMatrix<int,columnMajor> B2; // ... Resizing and initialization // Repeating the 2x3 dense row-major matrix 'A1' 2x rowwise and 3x columnwise results in // // ( 1 0 -2 1 0 -2 1 0 -2 ) // ( 0 5 0 0 5 0 0 5 0 ) // ( 1 0 -2 1 0 -2 1 0 -2 ) // ( 0 5 0 0 5 0 0 5 0 ) // A2 = repeat( A1, 2UL, 3UL ); A2 = repeat<2UL,3UL>( A1 ); // Repeating the 3x2 sparse column-major matrix 'B1' 2x rowwise and 3x columnwise results in // // ( 0 -1 0 -1 0 -1 ) // ( 0 4 0 4 0 4 ) // ( 7 0 7 0 7 0 ) // ( 0 -1 0 -1 0 -1 ) // ( 0 4 0 4 0 4 ) // ( 7 0 7 0 7 0 ) // B2 = repeat( B1, 2UL, 3UL ); B2 = repeat<2UL,3UL>( B1 ); \endcode // \n \section matrix_operations_statistic_operations Statistic Operations // <hr> // // \subsection matrix_operations_mean mean() // // The <a href="https://en.wikipedia.org/wiki/Arithmetic_mean">(arithmetic) mean</a> of a dense or // sparse matrix can be computed via the \c mean() function. In case of a sparse matrix, both the // non-zero and zero elements are taken into account. The following example demonstrates the // computation of the mean of a dense matrix: \code blaze::DynamicMatrix<int> A{ { 1, 4, 3, 6, 7 } , { 2, 6, 3, 1, 0 } }; const double m = mean( A ); // Results in 3.3 (i.e. 33/10) \endcode // In case the number of rows or columns of the given matrix is 0, a \c std::invalid_argument is // thrown. // // Alternatively it is possible to compute the row- or columnwise mean: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicMatrix<int> A{ { 1, 4, 3, 6, 7 } , { 2, 6, 3, 1, 0 } }; blaze::DynamicVector<double,columnVector> rm; blaze::DynamicVector<double,rowVector> cm; rm = mean<rowwise>( A ); // Results in ( 4.2 2.4 ) cm = mean<columnwise>( A ); // Results in ( 1.5 5.0 3.0 3.5 3.5 ) \endcode // In case the rowwise mean is computed and the number of columns of the given matrix is 0 or // in case the columnwise mean is computed and the number of rows of the given matrix is 0, a // \c std::invalid_argument is thrown. // // \n \subsection matrix_operations_var var() // // The <a href="https://en.wikipedia.org/wiki/Variance">variance</a> of a dense or sparse matrix // can be computed via the \c var() function. In case of a sparse vector, both the non-zero and // zero elements are taken into account. The following example demonstrates the computation of // the variance of a dense matrix: \code blaze::DynamicMatrix<int> A{ { 1, 3, 2 } , { 2, 6, 4 } , { 9, 6, 3 } }; const double v = var( A ); // Results in 6.5 \endcode // In case the size of the given matrix is smaller than 2, a \c std::invalid_argument is thrown. // // Alternatively it is possible to compute the row- or columnwise variance: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicMatrix<int> A{ { 1, 3, 2 } , { 2, 6, 4 } , { 9, 6, 3 } }; blaze::DynamicVector<double,columnVector> rv; blaze::DynamicVector<double,rowVector> cv; rv = var<rowwise>( A ); // Results in ( 1 4 9 ) cv = var<columnwise>( A ); // Results in ( 19 3 1 ) \endcode // In case the rowwise varoamce is computed and the number of columns of the given matrix is // smaller than 2 or in case the columnwise mean is computed and the number of rows of the given // matrix is smaller than 2, a \c std::invalid_argument is thrown. // // \n \subsection matrix_operations_stddev stddev() // // The <a href="https://en.wikipedia.org/wiki/Standard_deviation">standard deviation</a> of a // dense or sparse matrix can be computed via the \c stddev() function. In case of a sparse // vector, both the non-zero and zero elements are taken into account. The following example // demonstrates the computation of the standard deviation of a dense matrix: \code blaze::DynamicMatrix<int> A{ { 1, 3, 2 } , { 2, 6, 4 } , { 9, 6, 3 } }; const double s = stddev( A ); // Results in sqrt(6.5) \endcode // In case the size of the given matrix is smaller than 2, a \c std::invalid_argument is thrown. // // Alternatively it is possible to compute the row- or columnwise standard deviation: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicMatrix<int> A{ { 1, 3, 2 } , { 2, 6, 4 } , { 9, 6, 3 } }; blaze::DynamicVector<double,columnVector> rs; blaze::DynamicVector<double,rowVector> cs; rs = stddev<rowwise>( A ); // Results in ( 1 2 3 ) cs = stddev<columnwise>( A ); // Results in ( sqrt(19) sqrt(3) 1 ) \endcode // In case the rowwise standard deviation is computed and the number of columns of the given // matrix is smaller than 2 or in case the columnwise mean is computed and the number of rows of // the given matrix is smaller than 2, a \c std::invalid_argument is thrown. // // // \n \section matrix_operations_declaration_operations Declaration Operations // <hr> // // \subsection matrix_operations_declsym declsym() // // The \c declsym() operation can be used to explicitly declare any matrix or matrix expression // as symmetric: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declsym( A ); \endcode // Any matrix or matrix expression that has been declared as symmetric via \c declsym() will // gain all the benefits of a symmetric matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; DynamicMatrix<double> A, B, C; SymmetricMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isSymmetric( declsym( A ) ); // Will always return true without runtime effort S = declsym( A ); // Omit any runtime check for symmetry C = declsym( A * B ); // Declare the result of the matrix multiplication as symmetric, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declsym() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-symmetric matrix or // matrix expression as symmetric via the \c declsym() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declherm declherm() // // The \c declherm() operation can be used to explicitly declare any matrix or matrix expression // as Hermitian: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declherm( A ); \endcode // Any matrix or matrix expression that has been declared as Hermitian via \c declherm() will // gain all the benefits of an Hermitian matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; DynamicMatrix<double> A, B, C; HermitianMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isHermitian( declherm( A ) ); // Will always return true without runtime effort S = declherm( A ); // Omit any runtime check for Hermitian symmetry C = declherm( A * B ); // Declare the result of the matrix multiplication as Hermitian, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declherm() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-Hermitian matrix or // matrix expression as Hermitian via the \c declherm() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decllow decllow() // // The \c decllow() operation can be used to explicitly declare any matrix or matrix expression // as lower triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decllow( A ); \endcode // Any matrix or matrix expression that has been declared as lower triangular via \c decllow() // will gain all the benefits of a lower triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; DynamicMatrix<double> A, B, C; LowerMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isLower( decllow( A ) ); // Will always return true without runtime effort L = decllow( A ); // Omit any runtime check for A being a lower matrix C = decllow( A * B ); // Declare the result of the matrix multiplication as lower triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decllow() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-lower matrix or // matrix expression as lower triangular via the \c decllow() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declunilow declunilow() // // The \c declunilow() operation can be used to explicitly declare any matrix or matrix expression // as lower unitriangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declunilow( A ); \endcode // Any matrix or matrix expression that has been declared as lower unitriangular via \c declunilow() // will gain all the benefits of a lower unitriangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; DynamicMatrix<double> A, B, C; UniLowerMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isUniLower( declunilow( A ) ); // Will always return true without runtime effort L = declunilow( A ); // Omit any runtime check for A being an unilower matrix C = declunilow( A * B ); // Declare the result of the matrix multiplication as lower // unitriangular, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declunilow() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-unilower matrix or // matrix expression as lower unitriangular via the \c declunilow() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declstrlow declstrlow() // // The \c declstrlow() operation can be used to explicitly declare any matrix or matrix expression // as strictly lower triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declstrlow( A ); \endcode // Any matrix or matrix expression that has been declared as strictly lower triangular via // \c declstrlow() will gain all the benefits of a strictly lower triangular matrix, which range // from reduced runtime checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::StrictlyLowerMatrix; DynamicMatrix<double> A, B, C; StrictlyLowerMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isStrictlyLower( declstrlow( A ) ); // Will always return true without runtime effort L = declstrlow( A ); // Omit any runtime check for A being a strictly lower matrix C = declstrlow( A * B ); // Declare the result of the matrix multiplication as strictly lower // triangular, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declstrlow() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-strictly-lower matrix // or matrix expression as strictly lower triangular via the \c declstrlow() operation leads to // undefined behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declupp declupp() // // The \c declupp() operation can be used to explicitly declare any matrix or matrix expression // as upper triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declupp( A ); \endcode // Any matrix or matrix expression that has been declared as upper triangular via \c declupp() // will gain all the benefits of an upper triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::UpperMatrix; DynamicMatrix<double> A, B, C; UpperMatrix< DynamicMatrix<double> > U; // ... Resizing and initialization isUpper( declupp( A ) ); // Will always return true without runtime effort U = declupp( A ); // Omit any runtime check for A being an upper matrix C = declupp( A * B ); // Declare the result of the matrix multiplication as upper triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declupp() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-upper matrix or // matrix expression as upper triangular via the \c declupp() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decluniupp decluniupp() // // The \c decluniupp() operation can be used to explicitly declare any matrix or matrix expression // as upper unitriangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decluniupp( A ); \endcode // Any matrix or matrix expression that has been declared as upper unitriangular via \c decluniupp() // will gain all the benefits of a upper unitriangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::UniUpperMatrix; DynamicMatrix<double> A, B, C; UniUpperMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isUniUpper( decluniupp( A ) ); // Will always return true without runtime effort L = decluniupp( A ); // Omit any runtime check for A being an uniupper matrix C = decluniupp( A * B ); // Declare the result of the matrix multiplication as upper // unitriangular, i.e. perform an optimized matrix multiplication \endcode // \warning The \c decluniupp() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-uniupper matrix or // matrix expression as upper unitriangular via the \c decluniupp() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declstrupp declstrupp() // // The \c declstrupp() operation can be used to explicitly declare any matrix or matrix expression // as strictly upper triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declstrupp( A ); \endcode // Any matrix or matrix expression that has been declared as strictly upper triangular via // \c declstrupp() will gain all the benefits of a strictly upper triangular matrix, which range // from reduced runtime checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::StrictlyUpperMatrix; DynamicMatrix<double> A, B, C; StrictlyUpperMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isStrictlyUpper( declstrupp( A ) ); // Will always return true without runtime effort L = declstrupp( A ); // Omit any runtime check for A being a strictly upper matrix C = declstrupp( A * B ); // Declare the result of the matrix multiplication as strictly upper // triangular, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declstrupp() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-strictly-upper matrix // or matrix expression as strictly upper triangular via the \c declstrupp() operation leads to // undefined behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decldiag decldiag() // // The \c decldiag() operation can be used to explicitly declare any matrix or matrix expression // as diagonal: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decldiag( A ); \endcode // Any matrix or matrix expression that has been declared as diagonal via \c decldiag() will // gain all the benefits of a diagonal matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isDiagonal( decldiag( A ) ); // Will always return true without runtime effort D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix C = decldiag( A * B ); // Declare the result of the matrix multiplication as diagonal, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decldiag() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-diagonal matrix // or matrix expression as diagonal via the \c decldiag() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declid declid() // // The \c declid() operation can be used to explicitly declare any matrix or matrix expression // as identity matrix: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declid( A ); \endcode // Any matrix or matrix expression that has been declared as identity matrix via \c declid() will // gain all the benefits of an identity matrix, which range from reduced runtime checking to a // considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isIdentity( declid( A ) ); // Will always return true without runtime effort D = declid( A ); // Omit any runtime check for A being a diagonal matrix C = declid( A ) * B; // Declare the left operand of the matrix multiplication as an // identity matrix, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declid() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-identity matrix // or matrix expression as identity matrix via the \c declid() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declzero declzero() // // The \c declzero() operation can be used to explicitly declare any matrix or matrix expression // as zero matrix: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declzero( A ); \endcode // Any matrix or matrix expression that has been declared as zero matrix via \c declzero() will // gain all the benefits of a zero matrix, which range from reduced runtime checking to a // considerable speed-up in computations: \code using blaze::DynamicMatrix; DynamicMatrix<double> A, B, C; // ... Resizing and initialization isZero( declzero( A ) ); // Will always return true without runtime effort C = declzero( A ) + B; // Declare the left operand of the matrix addition as a // zero matrix, i.e. no addition needs to be performed \endcode // \warning The \c declzero() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-zero matrix or // matrix expression as zero matrix via the \c declzero() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \section matrix_operations_matrix_generators Matrix Generators // <hr> // // \subsection matrix_operations_generate generate() // // The \c generate() function returns a dense matrix filled elementwise via the given custom // binary operation. By default, the returned matrix is a row-major matrix, but this setting can // be changed via the \c BLAZE_DEFAULT_STORAGE_ORDER switch (see \ref storage_order). Alternatively // it is possible to specify the storage order explicitly.\n // The following example demonstrates the use of the \c generate() function: \code using blaze::generate; using blaze::rowMajor; using blaze::columnMajor> // Generates the uniform integer matrix ( ( 2, 2, 2 ), ( 2, 2, 2 ) ) blaze::DynamicMatrix<int,rowMajor> A; A = generate( 2UL, 3UL, []( size_t i, size_t j ){ return 2; } ); // Generates the linearly spaced float matrix ( ( 2.1, 3.2, 4.3 ), ( 5.4, 6.5, 7.6 ) ) blaze::DynamicMatrix<float,rowMajor> B; B = generate( 2UL, 3UL, []( size_t i, size_t j ){ return 2.1F + 1.1F*(i*3UL+j); } ); // Generates the logarithmically spaced double vector ( ( 1.0, 10.0 ), ( 100.0, 1000.0 ) ) blaze::DynamicMatrix<double,rowMajor> C; C = generate<rowMajor>( 2UL, 2UL, []( size_t i, size_t j ) { return blaze::exp10( 1.0 + 1.0*(i*2UL+j) ); } ); // Generates the vector of integer vectors ( ( 1, 2 ), ( 2, 3 ), ( 3, 4 ), ( 4, 5 ) ) using VT = StaticVector<int,2UL>; blaze::DynamicMatrix<VT,columnMajor> D; D = generate<columnMajor>( 2UL, 2UL, []( size_t i, size_t j ) { return evaluate( VT{ 1, 2 } + (i*2UL+j) ); } ); \endcode // \n \subsection matrix_operations_uniform uniform() // // The \c uniform() function creates a uniform matrix of the given size. By default, the // resulting uniform matrix is a row-major matrix, but this setting can be changed via the // \c BLAZE_DEFAULT_STORAGE_ORDER switch (see \ref storage_order). Alternatively it is // possible to specify the storage order explicitly.\n // The following example demonstrates the use of the \c uniform() function: \code using blaze::uniform; using blaze::rowMajor; using blaze::columnMajor; // Creates the uniform row-major matrix // ( 1, 1, 1, 1, 1 ) // ( 1, 1, 1, 1, 1 ) auto U1 = uniform( 2UL, 5UL, 1 ); // Creates the uniform row-major matrix // ( 1.2, 1.2 ) // ( 1.2, 1.2 ) // ( 1.2, 1.2 ) auto U2 = uniform<rowMajor>( 3UL, 2UL, 1.2 ); // Creates the uniform column-major matrix // ( 5U, 5U, 5U, 5U, 5U, 5U, 5U ) // ( 5U, 5U, 5U, 5U, 5U, 5U, 5U ) auto U3 = uniform<columnMajor>( 2UL, 7UL, 5U ); \endcode // \n \subsection matrix_operations_zero zero() // // The \c zero() function creates a zero matrix of the given element type and size. By default, // the resulting zero matrix is a row-major matrix, but this setting can be changed via the // \c BLAZE_DEFAULT_STORAGE_ORDER switch (see \ref storage_order). Alternatively it is possible // to specify the storage order explicitly.\n // The following example demonstrates the use of the \c zero() function: \code using blaze::zero; using blaze::rowMajor; using blaze::columnMajor; // Creates the row-major zero matrix // ( 0, 0, 0, 0, 0 ) // ( 0, 0, 0, 0, 0 ) auto Z1 = zero<int>( 2UL, 5UL ); // Creates the row-major zero matrix // ( 0.0, 0.0 ) // ( 0.0, 0.0 ) // ( 0.0, 0.0 ) auto Z2 = zero<double,rowMajor>( 3UL, 2UL ); // Creates the column-major zero matrix // ( 0U, 0U, 0U, 0U, 0U, 0U, 0U ) // ( 0U, 0U, 0U, 0U, 0U, 0U, 0U ) auto Z3 = zero<unsigned int,columnMajor>( 2UL, 7UL ); \endcode // \n \section matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The inverse of a square dense matrix can be computed via the \c inv() function: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = inv( A ); // Compute the inverse of A \endcode // Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert() // function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization invert( A ); // In-place matrix inversion \endcode // Both the \c inv() and the \c invert() functions will automatically select the most suited matrix // inversion algorithm depending on the size and type of the given matrix. For small matrices of // up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices // larger than 6x6 the inversion is performed by means of the most suited matrix decomposition // method: In case of a general matrix the LU decomposition is used, for symmetric matrices the // LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and // for triangular matrices the inverse is computed via a forward or back substitution. // // In case the type of the matrix does not provide additional compile time information about its // structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually // by means of \ref matrix_operations_declaration_operations when calling the \c invert() function: \code invert( declsym( A ) ); // In-place inversion of a symmetric matrix invert( declherm( A ) ); // In-place inversion of an Hermitian matrix invert( decllow( A ) ); // In-place inversion of a lower triangular matrix invert( declunilow( A ) ); // In-place inversion of a lower unitriangular matrix invert( declupp( A ) ); // In-place inversion of an upper triangular matrix invert( decluniupp( A ) ); // In-place inversion of an upper unitriangular matrix invert( decldiag( A ) ); // In-place inversion of a diagonal matrix \endcode // Alternatively, via the \c invert() function it is possible to explicitly specify the inversion // algorithm: \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using blaze::byLLH; // In-place inversion of a general matrix by means of an LU decomposition invert<byLU>( A ); // In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLT>( A ); // In-place inversion of an Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLH>( A ); // In-place inversion of a positive definite matrix by means of a Cholesky decomposition invert<byLLH>( A ); \endcode // Whereas the inversion by means of an LU decomposition works for every general square matrix, // the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is // restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works // for Hermitian positive definite matrices. Please note that it is in the responsibility of the // function caller to guarantee that the selected algorithm is suited for the given matrix. In // case this precondition is violated the result can be wrong and might not represent the inverse // of the given matrix! // // For both the \c inv() and \c invert() function the matrix inversion fails if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The matrix inversion can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \note It is not possible to use any kind of view on the expression object returned by the // \c inv() function. Also, it is not possible to access individual elements via the function call // operator on the expression object: \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression! inv( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The inversion functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the matrix may already have been modified. // // // \n \section matrix_operations_matrix_exponential Matrix Exponential // <hr> // // The matrix exponential of a \f$N \times N\f$ matrix \f$ X \f$ is defined as \f[ e^X = \sum\limits_{k=0}^\infty \frac{1}{k!} X^k. \f] // In order to compute the matrix exponential of a square dense matrix, the \c matexp() function // can be used: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = matexp( A ); // Compute the exponential of A \endcode // \note The matrix exponential can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type results in a compile time error! // // \note It is not possible to use any kind of view on the expression object returned by the // \c matexp() function. Also, it is not possible to access individual elements via the function // call operator on the expression object: \code row( matexp( A ), 2UL ); // Compilation error: Views cannot be used on an matexp() expression! matexp( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \n \section matrix_operations_decomposition Matrix Decomposition // <hr> // // \note All decomposition functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \subsection matrix_operations_decomposition_lu LU Decomposition // // The LU decomposition of a dense matrix can be computed via the \c lu() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a row-major matrix assert( A == L * U * P ); \endcode \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a column-major matrix assert( A == P * L * U ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the // three matrices \c A, \c L and \c U are required to have the same storage order. Also, please // note that the way the permutation matrix \c P needs to be applied differs between row-major and // column-major matrices, since the algorithm uses column interchanges for row-major matrices and // row interchanges for column-major matrices. // // Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates // the LU decomposition of a symmetric matrix into a lower and upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; blaze::DynamicMatrix<double,blaze::columnMajor> P; lu( A, L, U, P ); // LU decomposition of A \endcode // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition // // The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; llh( A, L ); // LLH decomposition of a row-major matrix assert( A == L * ctrans( L ) ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A // and \c L can have any storage order. // // Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates // the LLH decomposition of a symmetric matrix into a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; llh( A, L ); // Cholesky decomposition of A \endcode // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // The QR decomposition of a dense matrix can be computed via the \c qr() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> Q; blaze::DynamicMatrix<double,blaze::rowMajor> R; qr( A, Q, R ); // QR decomposition of a row-major matrix assert( A == Q * R ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c R can have any storage order. // // Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates // the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R; qr( A, Q, R ); // QR decomposition of A \endcode // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via // the \c rq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> R; blaze::DynamicMatrix<double,blaze::columnMajor> Q; rq( A, R, Q ); // RQ decomposition of a row-major matrix assert( A == R * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c R and \c Q can have any storage order. // // Also the \c rq() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the RQ decomposition of an Hermitian matrix into a general // matrix and an upper triangular matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; rq( A, R, Q ); // RQ decomposition of A \endcode // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // The QL decomposition of a dense matrix can be computed via the \c ql() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::DynamicMatrix<double,blaze::columnMajor> L; ql( A, Q, L ); // QL decomposition of a row-major matrix assert( A == Q * L ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c L can have any storage order. // // Also the \c ql() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the QL decomposition of a symmetric matrix into a general // matrix and a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; ql( A, Q, L ); // QL decomposition of A \endcode // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // The LQ decomposition of a dense matrix can be computed via the \c lq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; blaze::DynamicMatrix<double,blaze::columnMajor> Q; lq( A, L, Q ); // LQ decomposition of a row-major matrix assert( A == L * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c L and \c Q can have any storage order. // // Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates // the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; lq( A, L, Q ); // LQ decomposition of A \endcode // \n \section matrix_operations_linear_systems Linear Systems // <hr> // // The \c solve() function computes a solution for the given dense linear system of equations (LSE) // \f$ A*x=b \f$, where \c A is the given system matrix, \c x is the solution vector, and \c b is // the given dense right-hand side vector: \code blaze::DynamicMatrix<double> A; // The square general system matrix blaze::DynamicVector<double> b; // The right-hand side vector // ... Resizing and initialization blaze::DynamicVector<double> x; // The solution vector solve( A, x, b ); // Computing the solution x x = solve( A, b ); // Alternative syntax \endcode // Alternatively, \c solve() computes a solution for the given dense LSE \f$ A*X=B \f$, where \c A // is the given dense system matrix, the columns of \c X are the solution vectors, and the columns // of \c B are the given right-hand side vectors: \code blaze::DynamicMatrix<double> A; // The square general system matrix blaze::DynamicMatrix<double> B; // The right-hand side matrix // ... Resizing and initialization blaze::DynamicMatrix<double> X; // The solution matrix solve( A, X, B ); // Computing the solutions X X = solve( A, B ); // Alternative syntax \endcode // Both \c solve() functions will automatically select the most suited direct solver algorithm // depending on the size and type of the given system matrix. For small matrices of up to 6x6, // both functions use manually optimized kernels for maximum performance. For matrices larger // than 6x6 the computation is performed by means of the most suited LAPACK solver method (see // \ref lapack_linear_system_solver). // // In case the type of the matrix does not provide additional compile time information about // its structure (symmetric, lower, upper, diagonal, ...), the information can be provided // manually by means of \ref matrix_operations_declaration_operations when calling the \c solve() // functions: \code blaze::DynamicMatrix<double> A; // The square lower system matrix blaze::DynamicVector<double> b; // The right-hand side vector // ... Resizing and initialization blaze::DynamicVector<double> x; // The solution vector solve( declsym( A ), x, b ); // Solving the LSE with a symmetric system matrix solve( declherm( A ), x, b ); // Solving the LSE with an Hermitian system matrix solve( decllow( A ), x, b ); // Solving the LSE with a lower system matrix solve( declunilow( A ), x, b ); // Solving the LSE with an unilower system matrix solve( declupp( A ), x, b ); // Solving the LSE with an upper system matrix solve( decluniupp( A ), x, b ); // Solving the LSE with an uniupper system matrix solve( decldiag( A ), x, b ); // Solving the LSE with a diagonal system matrix \endcode // For both \c solve() functions the computation fails if ... // // - ... the given matrix is not a square matrix; // - ... the size of the right-hand side vector doesn't match the dimensions of the system matrix; // - ... the number of rows of the right-hand side matrix doesn't match the dimensions of the system matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The \c solve() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions may make use of LAPACK kernels. Thus the functions can only be used if a // fitting LAPACK library is available and linked to the executable. Otherwise a linker error will // be created. // // \note It is not possible to use any kind of view on the expression object returned by the // two-argument \c solve() function. Also, it is not possible to access individual elements via // the function call operator on the expression object: \code row( solve( A, b ), 2UL ); // Compilation error: Views cannot be used on an solve() expression! solve( A, b )[2]; // Compilation error: It is not possible to access individual elements! rows( solve( A, B ), { 2UL, 4UL } ); // Compilation error: Views cannot be used on an solve() expression! solve( A, B )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The \c solve() functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the solution vector or matrix may already have been modified. // // // \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors // <hr> // // The eigenvalues and eigenvectors of a dense matrix can be computed via the \c eigen() functions. // The following examples give an impression of the computation of eigenvalues and eigenvectors // for a general, a symmetric, and an Hermitian matrix: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A // ... Initialization DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the complex eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors w = eigen( A ); // Computing only the eigenvalues of A (one argument) eigen( A, w ); // Computing only the eigenvalues of A (two arguments) eigen( A, w, V ); // Computing both the eigenvalues and eigenvectors of A (three arguments) \endcode \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL ); // The symmetric matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors w = eigen( A ); // Computing only the eigenvalues of A (one argument) eigen( A, w ); // Computing only the eigenvalues of A (two arguments) eigen( A, w, V ); // Computing both the eigenvalues and eigenvectors of A (three arguments) \endcode \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL ); // The Hermitian matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors w = eigen( A ); // Computing only the eigenvalues of A (one argument) eigen( A, w ); // Computing only the eigenvalues of A (two arguments) eigen( A, w, V ); // Computing both the eigenvalues and eigenvectors of A (three arguments) \endcode // The one- and two-argument functions compute only the eigenvalues of the given \a n-by-\a n // matrix, the three-argument function additionally computes the eigenvectors. The eigenvalues // are returned in the given vector \a w and the eigenvectors are returned in the given matrix // \a V, which are both resized to the correct dimensions (if possible and necessary). // // Depending on the given matrix type, the resulting eigenvalues are either of floating point // or complex type: In case the given matrix is either a compile time symmetric matrix with // floating point elements or an Hermitian matrix with complex elements, the resulting eigenvalues // will be of floating point type and therefore the elements of the given eigenvalue vector are // expected to be of floating point type. In all other cases they are expected to be of complex // type. Please note that for complex eigenvalues no order of eigenvalues can be assumed, except // that complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having // the positive imaginary part first. // // In case \a A is a row-major matrix, \a V will contain the left eigenvectors, otherwise \a V // will contain the right eigenvectors. In case \a V is a row-major matrix the eigenvectors are // returned in the rows of \a V, in case \a V is a column-major matrix the eigenvectors are // returned in the columns of \a V. In case the given matrix is a compile time symmetric matrix // with floating point elements, the resulting eigenvectors will be of floating point type and // therefore the elements of the given eigenvector matrix are expected to be of floating point // type. In all other cases they are expected to be of complex type. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // In all failure cases an exception is thrown. // // \note All \c eigen() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the eigenvalues and/or eigenvectors of a dense matrix by means of // LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is available // and linked to the executable. Otherwise a linker error will be created. // // // \n \section matrix_operations_singularvalues Singular Values/Singular Vectors // <hr> // // The singular value decomposition (SVD) of a dense matrix can be computed via the \c svd() // functions. The following two examples give an impression of the computation of singular values // and singular vectors for a general dense matrix with \c double and \c complex<double> element // type, respectively: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<double,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<double,rowMajor> V; // The matrix for the right singular vectors s = svd( A ); // (1) Computing only the singular values of A svd( A, s ); // (2) Computing only the singular values of A svd( A, U, s, V ); // (3) Computing the singular values and vectors of A svd( A, s, 0.0, 1.0 ); // (4) Computing all singular values in the floating point range [0.0..1.0) svd( A, U, s, V, 0, 2 ); // (5) Computing the singular values and vectors in the index range [0..2] \endcode \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<complex<double>,rowMajor> V; // The matrix for the right singular vectors s = svd( A ); // (1) Computing only the singular values of A svd( A, s ); // (2) Computing only the singular values of A svd( A, U, s, V ); // (3) Computing the singular values and vectors of A svd( A, s, 0.0, 1.0 ); // (4) Computing all singular values in the floating point range [0.0..1.0) svd( A, U, s, V, 0, 2 ); // (5) Computing the singular values and vectors in the index range [0..2] \endcode // Functions (1), (2) and (4) compute only singular values of the given general \a m-by-\a n // matrix, functions (3) and (5) additionally compute singular vectors. The resulting singular // values are returned in the given vector \a s, the left singular vectors are returned in the // given matrix \a U, and the right singular vectors are returned in the matrix \a V. \a s, \a U, // and \a V are resized to the correct dimensions (if possible and necessary). // // Functions (4) and (5) allow for the specification of a subset of singular values and/or // vectors. The number of singular values and vectors to be computed is specified by the lower // bound \a low and the upper bound \a upp, which either form an integral or a floating point // range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // In all failure cases an exception is thrown. // // \note All \c svd() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the singular values and/or singular vectors of a dense matrix by // means of LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is // available and linked to the executable. Otherwise a linker error will be created. // // // \n Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors*************************************************************************************** /*!\page adaptors Adaptors // // \tableofcontents // // // \section adaptors_general General Concepts // <hr> // // Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the // matrices such that certain invariants are preserved. Due to this adaptors can provide a compile // time guarantee of certain properties, which can be exploited for optimized performance. // // The \b Blaze library provides a total of 9 different adaptors: // // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices // <ul> // <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix </li> // <li> \ref adaptors_triangular_matrices_unilowermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_uppermatrix </li> // <li> \ref adaptors_triangular_matrices_uniuppermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Diagonal Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // </li> // </ul> // </li> // </ul> // // In combination with the general matrix types, \b Blaze provides a total of 40 different matrix // types that make it possible to exactly adapt the type of matrix to every specific problem. // // // \n \section adaptors_examples Examples // <hr> // // The following code examples give an impression on the use of adaptors. The first example shows // the multiplication between two lower matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant // performance advantage in comparison to a general matrix multiplication, especially for large // matrices. // // The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse // vector multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which significantly increases the performance. // // \n Previous: \ref matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices***************************************************************************** /*!\page adaptors_symmetric_matrices Symmetric Matrices // // \tableofcontents // // // \n \section adaptors_symmetric_matrices_general Symmetric Matrices // <hr> // // In contrast to general matrices, which have no restriction in their number of rows and columns // and whose elements can have any value, symmetric matrices provide the compile time guarantee // to be square matrices with pair-wise identical values. Mathematically, this means that a // symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can // be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze // library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it // by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its // transpose \f$ A = A^T \f$). It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/SymmetricMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code namespace blaze { template< typename MT > class SymmetricMatrix; } // namespace blaze \endcode // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible symmetric matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense symmetric matrix with static memory blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense symmetric matrix based on HybridMatrix blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision symmetric matrix blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E; \endcode // The storage order of a symmetric matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix // will also be a column-major matrix. // // // \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the symmetry constraint: // // -# <b>\ref adaptors_symmetric_matrices_square</b> // -# <b>\ref adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref adaptors_symmetric_matrices_initialization</b> // // \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 symmetric static matrix SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced! // // This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are // symmetric themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, row-major 3x3 symmetric compressed matrix SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); // Initializing three elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator *A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; symmetric invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2) \endcode // The symmetry property is also enforced for symmetric custom matrices: In case the given array // of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomSymmetric = SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 symmetric custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomSymmetric A( array, 3UL ); // OK // Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomSymmetric B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the // symmetric matrix. The following example demonstrates that modifying the elements of an entire // row of the symmetric matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = 4; A(2,3) = 5; // Setting all elements in the 1st row to 0 results in the matrix // // ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) // row( A, 1 ) = 0; \endcode // The next example demonstrates the (compound) assignment to submatrices of symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the symmetric property of // dense symmetric matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A SymmetricMatrix matrix can participate in numerical operations in any way any other dense // or sparse matrix can participate. It can also be combined with any other dense or sparse vector // or matrix. The following code example gives an impression of the use of SymmetricMatrix within // arithmetic operations: \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); SymmetricMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major symmetric matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major symmetric matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a symmetric matrix. In case the matrix // to be assigned is not symmetric at compile time, a runtime check is performed. // // // \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices // <hr> // // It is also possible to use symmetric block matrices: \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; // Definition of a 3x3 symmetric block matrix based on CompressedMatrix SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); \endcode // Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and // guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ a_{ji} \f$: \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } ); // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_symmetric_matrices_performance Performance Considerations // <hr> // // When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor // instead of a general matrix can be a considerable performance advantage. The \b Blaze library // tries to exploit the properties of symmetric matrices whenever possible. However, there are // also situations when using a symmetric matrix introduces some overhead. The following examples // demonstrate several situations where symmetric matrices can positively or negatively impact // performance. // // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< CompressedMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the // SymmetricMatrix adapter is obviously an advantage. // // \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); auto row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a symmetric matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not symmetric at compile time: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the symmetric matrix; no performance penalty C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead \endcode // When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary // to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property // of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two symmetric matrices does not necessarily result in another symmetric matrix: \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; // Results in a symmetric matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead \endcode // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices***************************************************************************** /*!\page adaptors_hermitian_matrices Hermitian Matrices // // \tableofcontents // // // \n \section adaptors_hermitian_matrices_general Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices. // Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise // conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal // to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have // a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze // library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // <hr> // // The HermitianMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/HermitianMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code namespace blaze { template< typename MT > class HermitianMatrix; } // namespace blaze \endcode // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible Hermitian matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense Hermitian matrix with static memory blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; // Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision complex Hermitian matrix blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode // The storage order of an Hermitian matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix // will also be a column-major matrix. // // // \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits. // However, there are a couple of differences, both from a mathematical point of view as well as // from an implementation point of view. // // From a mathematical point of view, a matrix is called symmetric when it is equal to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two // conditions coincide, which means that symmetric matrices of real values are also Hermitian // and Hermitian matrices of real values are also symmetric. // // From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data // types (i.e. all integral types except \c bool, floating point and complex types), whereas // symmetric matrices can also be block matrices (i.e. can have vector or matrix elements). // For built-in element types, the HermitianMatrix adaptor behaves exactly like the according // SymmetricMatrix implementation. For complex element types, however, the Hermitian property // is enforced (see also \ref adaptors_hermitian_matrices_hermitian). \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::HermitianMatrix; using blaze::SymmetricMatrix; // The following two matrices provide an identical experience (including performance) HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric // The following two matrices will behave differently HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric // Hermitian block matrices are not allowed HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix \endcode // \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices // <hr> // // An Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the Hermitian symmetry constraint: // // -# <b>\ref adaptors_hermitian_matrices_square</b> // -# <b>\ref adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref adaptors_hermitian_matrices_initialization</b> // // \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 Hermitian static matrix HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced! // // This means that the following properties of an Hermitian matrix are always guaranteed: // // - The diagonal elements are real numbers, i.e. the imaginary part is zero // - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of an Hermitian matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that // are Hermitian themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using cplx = std::complex<double>; // Default constructed, row-major 3x3 Hermitian compressed matrix HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); // Initializing the matrix via the function call operator // // ( (1, 0) (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function // // ( (1,-3) (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // *A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from an Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; C = D; // Throws an exception; Hermitian invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up an Hermitian sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using cplx = std::complex<double>; // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2) \endcode // The Hermitian property is also enforced for Hermitian custom matrices: In case the given array // of elements does not represent an Hermitian matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomHermitian = HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 Hermitian custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomHermitian A( array, 3UL ); // OK // Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomHermitian B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the // Hermitian matrix. The following example demonstrates that modifying the elements of an entire // row of the Hermitian matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; using cplx = std::complex<double>; // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); A(2,3) = cplx( 5.0, 3.0 ); // Setting all elements in the 1st row to 0 results in the matrix // // ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode // The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of an Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian // symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; std::complex<double> cplx; // Setup of two default 4x4 Hermitian matrices HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); B(2,2) = cplx( 6.0, 7.0 ); // OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved! // The elements marked with X cannot be assigned unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the Hermitian property of // dense Hermitian matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations // <hr> // // An HermitianMatrix can be used within all numerical operations in any way any other dense or // sparse matrix can be used. It can also be combined with any other dense or sparse vector or // matrix. The following code example gives an impression of the use of HermitianMatrix within // arithmetic operations: \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; using cplx = complex<float>; DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, 3 ); HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 ); HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to an Hermitian matrix. In case the matrix // to be assigned is not Hermitian at compile time, a runtime check is performed. // // // \n \section adaptors_hermitian_matrices_performance Performance Considerations // <hr> // // When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor // instead of a general matrix can be a considerable performance advantage. This is particularly // true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The // \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever // possible. However, there are also situations when using an Hermitian matrix introduces some // overhead. The following examples demonstrate several situations where Hermitian matrices can // positively or negatively impact performance. // // \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a // symmetric matrix is obviously an advantage. // // \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); // Both Hermitian and symmetric auto row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using an Hermitian matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using an Hermitian matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not Hermitian at compile time: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; HermitianMatrix< DynamicMatrix< complex<double> > > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the Hermitian matrix; no performance penalty C = A; // Assignment of an Hermitian matrix to another Hermitian matrix; no runtime overhead C = B; // Assignment of a general matrix to an Hermitian matrix; some runtime overhead \endcode // When assigning a general, potentially not Hermitian matrix to an Hermitian matrix it is necessary // to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property // of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix: \code HermitianMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in an Hermitian matrix; no runtime overhead C = A - B; // Results in an Hermitian matrix; no runtime overhead C = A * B; // Is not guaranteed to result in an Hermitian matrix; some runtime overhead \endcode // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices**************************************************************************** /*!\page adaptors_triangular_matrices Triangular Matrices // // \tableofcontents // // // \n \section adaptors_triangular_matrices_general Triangular Matrices // <hr> // // Triangular matrices come in three flavors: Lower triangular matrices provide the compile time // guarantee to be square matrices and that the upper part of the matrix contains only default // elements that cannot be modified. Upper triangular matrices on the other hand provide the // compile time guarantee to be square and that the lower part of the matrix contains only fixed // default elements. Finally, diagonal matrices provide the compile time guarantee to be square // and that both the lower and upper part of the matrix contain only immutable default elements. // These properties can be exploited to gain higher performance and/or to save memory. Within the // \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized // by the following class templates: // // Lower triangular matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper triangular matrices: // - <b>\ref adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // <hr> // // The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower // triangular matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/LowerMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code namespace blaze { template< typename MT > class LowerMatrix; } // namespace blaze \endcode // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense lower matrix with static memory blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense lower matrix based on HybridMatrix blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense lower matrix based on DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense lower matrix based on CustomMatrix blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision lower matrix blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode // The storage order of a lower matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements above the diagonal are 0 (lower unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/UniLowerMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code namespace blaze { template< typename MT > class UniLowerMatrix; } // namespace blaze \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower unitriangular matrices: \code // Definition of a 3x3 row-major dense unilower matrix with static memory blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense unilower matrix based on HybridMatrix blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense unilower matrix based on DynamicMatrix blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision unilower matrix blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a lower unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the unilower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements above the diagonal are 0 (strictly lower triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/StrictlyLowerMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code namespace blaze { template< typename MT > class StrictlyLowerMatrix; } // namespace blaze \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly lower triangular matrices: \code // Definition of a 3x3 row-major dense strictly lower matrix with static memory blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly lower matrix blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly lower triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly lower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper // triangular matrix): \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ \end{array}\right).\f] // It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/UpperMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code namespace blaze { template< typename MT > class UpperMatrix; } // namespace blaze \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper matrices: \code // Definition of a 3x3 row-major dense upper matrix with static memory blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense upper matrix based on HybridMatrix blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense upper matrix based on DynamicMatrix blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision upper matrix blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements below the diagonal are 0 (upper unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/UniUpperMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code namespace blaze { template< typename MT > class UniUpperMatrix; } // namespace blaze \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper unitriangular matrices: \code // Definition of a 3x3 row-major dense uniupper matrix with static memory blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense uniupper matrix based on HybridMatrix blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision uniupper matrix blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the uniupper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements below the diagonal are 0 (strictly upper triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/StrictlyUpperMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code namespace blaze { template< typename MT > class StrictlyUpperMatrix; } // namespace blaze \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly upper triangular matrices: \code // Definition of a 3x3 row-major dense strictly upper matrix with static memory blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly upper matrix blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly upper triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly upper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all matrix elements above and below the diagonal // are 0 (diagonal matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/DiagonalMatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code namespace blaze { template< typename MT > class DiagonalMatrix; } // namespace blaze \endcode // \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible diagonal matrices: \code // Definition of a 3x3 row-major dense diagonal matrix with static memory blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense diagonal matrix based on HybridMatrix blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision diagonal matrix blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a diagonal matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices // <hr> // // A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the triangular matrix constraint: // // -# <b>\ref adaptors_triangular_matrices_square</b> // -# <b>\ref adaptors_triangular_matrices_triangular</b> // -# <b>\ref adaptors_triangular_matrices_initialization</b> // -# <b>\ref adaptors_triangular_matrices_storage</b> // -# <b>\ref adaptors_triangular_matrices_scaling</b> // // \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 lower dynamic matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 lower static matrix LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced! // // This means that it is only allowed to modify elements in the lower part or the diagonal of // a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix. // Unitriangular and strictly triangular matrices are even more restrictive and don't allow the // modification of diagonal elements. Also, triangular matrices can only be assigned matrices that // don't violate their triangular property. The following example demonstrates this restriction // by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types // see the according class documentations. \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using CompressedLower = LowerMatrix< CompressedMatrix<double,rowMajor> >; // Default constructed, row-major 3x3 lower compressed matrix CompressedLower A( 3 ); // Initializing elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(2,0) = 2.0; // Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an exception; invalid modification of upper element // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element // Appending an element via the append() function A.reserve( 1, 3 ); // Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); *it = 6.0; // Modifies the lower element (1,0) ++it; *it = 9.0; // Modifies the diagonal element (1,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element (2,0) // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; lower matrix invariant would be violated! \endcode // The triangular property is also enforced during the construction of triangular custom matrices: // In case the given array of elements does not represent the according triangular matrix type, a // \c std::invalid_argument exception is thrown: \code using blaze::CustomMatrix; using blaze::LowerMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomLower = LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 lower custom matrix from a properly initialized array double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; CustomLower A( array, 3UL ); // OK // Attempt to create a second 3x3 lower custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomLower B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...) // on the triangular matrix. The following example demonstrates that modifying the elements of an // entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements. // Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types // see the according class documentations. \code using blaze::DynamicMatrix; using blaze::LowerMatrix; // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; // Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) // ( 4 0 5 0 ) // row( A, 2 ) = 9; // Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode // The next example demonstrates the (compound) assignment to rows/columns and submatrices of // triangular matrices. Since only lower/upper and potentially diagonal elements may be modified // the matrix to be assigned must be structured such that the triangular matrix invariant of the // matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::LowerMatrix; using blaze::rowVector; // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK // Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element // marked with X cannot be assigned and triggers an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws an exception! // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; B(2,1) = 9; // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be // preserved! The elements marked with X cannot be assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency during the creation of a dense lower or // upper matrix this initialization is important since otherwise the lower/upper matrix property // of dense lower matrices would not be guaranteed: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // 5x5 row-major lower dynamic matrix with default initialized upper matrix LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); // 7x7 column-major upper dynamic matrix with default initialized lower matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); // 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); \endcode // \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements! // // All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable // elements in the lower or upper part, respectively. Therefore dense triangular matrices don't // provide any kind of memory reduction! There are two main reasons for this: First, storing also // the zero elements guarantees maximum performance for many algorithms that perform vectorized // operations on the triangular matrices, which is especially true for small dense matrices. // Second, conceptually all triangular adaptors merely restrict the interface to the matrix type // \c MT and do not change the data layout or the underlying matrix type. // // This property matters most for diagonal matrices. In order to achieve the perfect combination // of performance and memory consumption for a diagonal matrix it is recommended to use dense // matrices for small diagonal matrices and sparse matrices for large diagonal matrices: \code // Recommendation 1: use dense matrices for small diagonal matrices using SmallDiagonalMatrix = blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> >; // Recommendation 2: use sparse matrices for large diagonal matrices using LargeDiagonalMatrix = blaze::DiagonalMatrix< blaze::CompressedMatrix<float> >; \endcode // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled! // // Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible // to self-scale such a matrix: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; UniLowerMatrix< DynamicMatrix<int> > A( 4 ); A *= 2; // Compilation error; Scale operation is not available on an unilower matrix A /= 2; // Compilation error; Scale operation is not available on an unilower matrix A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix \endcode // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A lower and upper triangular matrix can participate in numerical operations in any way any other // dense or sparse matrix can participate. It can also be combined with any other dense or sparse // vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix // within arithmetic operations: \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); LowerMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; LowerMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major lower matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major lower matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a triangular matrix. In case the // matrix to be assigned does not satisfy the invariants of the triangular matrix at compile // time, a runtime check is performed. Also note that upper triangular, diagonal, unitriangular // and strictly triangular matrix types can be used in the same way, but may pose some additional // restrictions (see the according class documentations). // // // \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices // <hr> // // It is also possible to use triangular block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Definition of a 5x5 lower block matrix based on DynamicMatrix LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Definition of a 7x7 upper block matrix based on CompressedMatrix UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to // manipulate elements in the upper part (lower triangular matrix) or the lower part (upper // triangular matrix) of the matrix: \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } }; A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception \endcode // Note that unitriangular matrices are restricted to numeric element types and therefore cannot // be used for block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::UniLowerMatrix; using blaze::UniUpperMatrix; // Compilation error: lower unitriangular matrices are restricted to numeric element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Compilation error: upper unitriangular matrices are restricted to numeric element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_triangular_matrices_performance Performance Considerations // <hr> // // The \b Blaze library tries to exploit the properties of lower and upper triangular matrices // whenever and wherever possible. Therefore using triangular matrices instead of a general // matrices can result in a considerable performance improvement. However, there are also // situations when using a triangular matrix introduces some overhead. The following examples // demonstrate several common situations where triangular matrices can positively or negatively // impact performance. // // \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. The following example demonstrates this by // means of a dense matrix/dense matrix multiplication with lower triangular matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // In comparison to a general matrix multiplication, the performance advantage is significant, // especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix // and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular, // respectively. Note however that the performance advantage is most pronounced for dense matrices // and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar performance improvement can be gained when using a triangular matrix in a matrix/vector // multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; LowerMatrix< DynamicMatrix<double,rowMajor> > A; DynamicVector<double,columnVector> x, y; // ... Resizing and initialization y = A * x; \endcode // In this example, \b Blaze also exploits the structure of the matrix and approx. halves the // runtime of the multiplication. Also in case of matrix/vector multiplications the performance // improvement is most pronounced for dense matrices and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for // read access), which introduces absolutely no performance penalty, using a triangular matrix // on the left-hand side of an assignment (i.e. for write access) may introduce additional // overhead when it is assigned a general matrix, which is not triangular at compile time: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the lower matrix; no performance penalty C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead \endcode // When assigning a general (potentially not lower triangular) matrix to a lower matrix or a // general (potentially not upper triangular) matrix to an upper matrix it is necessary to check // whether the matrix is lower or upper at runtime in order to guarantee the triangular property // of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as // efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime // overhead it is therefore generally advisable to assign lower or upper triangular matrices to // other lower or upper triangular matrices.\n // In this context it is especially noteworthy that the addition, subtraction, and multiplication // of two triangular matrices of the same structure always results in another triangular matrix: \code LowerMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // Results in a lower matrix; no runtime overhead C = A * B; // Results in a lower matrix; no runtime overhead \endcode \code UpperMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in an upper matrix; no runtime overhead C = A - B; // Results in an upper matrix; no runtime overhead C = A * B; // Results in an upper matrix; no runtime overhead \endcode // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref views */ //************************************************************************************************* //**Views****************************************************************************************** /*!\page views Views // // \tableofcontents // // // \section views_general General Concepts // <hr> // // Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific // row, column, or band of a matrix. As such, views act as a reference to specific elements of // a vector or matrix. This reference is valid and can be used in every way as any other vector // or matrix can be used as long as the referenced vector or matrix is not resized or entirely // destroyed. Views also act as alias to the elements of the vector or matrix: Changes made to the // elements (e.g. modifying values, inserting or erasing elements) via the view are immediately // visible in the vector or matrix and changes made via the vector or matrix are immediately // visible in the view. // // It is also possible to create nested views (compound views), such as for instance bands of // submatrices or row selections on column selections. A compound view also acts as reference // to specific elements of the underlying vector or matrix and is valid as long as the underlying, // referenced vector or matrix is not resized or entirely destroyed. // // The \b Blaze library provides the following views on vectors and matrices: // // Vector views: // - \ref views_subvectors // - \ref views_element_selections // // Matrix views: // - \ref views_submatrices // - \ref views_rows // - \ref views_row_selections // - \ref views_columns // - \ref views_column_selections // - \ref views_bands // // // \n \section views_examples Examples \code using blaze::DynamicMatrix; using blaze::StaticVector; // Setup of the 3x5 row-major matrix DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; // Setup of the 2-dimensional row vector StaticVector<int,2UL,rowVector> vec{ 18, 19 }; // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // subvector( row( A, 1UL ), 2UL, 2UL ) = vec; // Switching rows 0 and 2 of A // // ( 1 0 0 2 1 ) // ( 0 2 18 19 -1 ) // ( 1 0 -2 3 0 ) // rows<0,2>( A ) = rows<2,0>( A ); // Warning: It is the programmer's responsibility to ensure the view does not outlive // the viewed vector or matrix (dangling reference)! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 } } ); \endcode // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref views_subvectors */ //************************************************************************************************* //**Subvectors************************************************************************************* /*!\page views_subvectors Subvectors // // \tableofcontents // // // Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors // act as a reference to a specific range within a vector. This reference is valid and can be // used in every way any other dense or sparse vector can be used as long as the vector containing // the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the // vector elements in the specified range: Changes made to the elements (e.g. modifying values, // inserting or erasing elements) are immediately visible in the vector and changes made via the // vector are immediately visible in the subvector. // // // \n \section views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense or sparse subvector can be created very conveniently via the \c subvector() // function. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/Subvector.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The first parameter specifies the offset of the subvector within the underlying dense or sparse // vector, the second parameter specifies the size of the subvector. The two parameters can be // specified either at compile time or at runtime: \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Create a subvector from index 4 with a size of 12 (i.e. in the range [4..15]) (compile time arguments) auto sv1 = subvector<4UL,12UL>( x ); // Create a subvector from index 8 with a size of 16 (i.e. in the range [8..23]) (runtime arguments) auto sv2 = subvector( x, 8UL, 16UL ); \endcode // The \c subvector() function returns an expression representing the subvector view. The type of // this expression depends on the given subvector arguments, primarily the type of the vector and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using VectorType = blaze::DynamicVector<int>; using SubvectorType = decltype( blaze::subvector<4UL,12UL>( std::declval<VectorType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. A subvector created // from a row vector can be used as any other row vector, a subvector created from a column vector // can be used as any other column vector. The view can also be used on both sides of an assignment: // The subvector can either be used as an alias to grant write access to a specific subvector of a // vector primitive on the left-hand side of an assignment or to grant read-access to a specific // subvector of a vector primitive or expression on the right-hand side of an assignment. The // following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Create a subvector from index 0 with a size of 10 (i.e. in the range [0..9]) auto sv = subvector( x, 0UL, 10UL ); // Setting the first ten elements of x to the 2nd row of matrix A sv = row( A, 2UL ); // Setting the second ten elements of x to y subvector( x, 10UL, 10UL ) = y; // Setting the 3rd row of A to a subvector of x row( A, 3UL ) = subvector( x, 3UL, 10UL ); // Setting x to a subvector of the result of the addition between y and the 1st row of A x = subvector( y + row( A, 1UL ), 2UL, 5UL ); \endcode // \warning It is the programmer's responsibility to ensure the subvector does not outlive the // viewed vector: \code // Creating a subvector on a temporary vector; results in a dangling reference! auto sv = subvector<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } ); \endcode // \n \section views_subvectors_element_access Element Access // <hr> // // The elements of a subvector can be directly accessed via the subscript operator: \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 auto sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode // The numbering of the subvector elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the specified size of the subvector. Alternatively, the elements of a subvector can // be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // subvectors an iterator to immutable elements is returned: \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of vector v auto sv = subvector( v, 16UL, 64UL ); // Traversing the elements via iterators to non-const elements for( auto it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // OK: Write access to the dense subvector value. ... = *it; // OK: Read access to the dense subvector value. } // Traversing the elements via iterators to const elements for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense subvector value. } \endcode \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of vector v auto sv = subvector( v, 16UL, 64UL ); // Traversing the elements via iterators to non-const elements for( auto it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_subvectors_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse subvector can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256 auto sv = subvector( v, 10UL, 60UL ); // View on the range [10..69] of v // The subscript operator provides access to all possible elements of the sparse subvector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse subvector, the element is inserted into the // subvector. sv[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the subvector it is inserted into the subvector, if it is already contained // in the subvector its value is modified. sv.set( 45UL, -1.2 ); // An alternative for inserting elements into the subvector is the insert() function. However, // it inserts the element only in case the element is not already contained in the subvector. sv.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In // case of subvectors, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the subvector and that the subvector's // capacity is large enough to hold the new element. Note however that due to the nature of // a subvector, which may be an alias to the middle of a sparse vector, the append() function // does not work as efficiently for a subvector as it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); \endcode // \n \section views_subvectors_common_operations Common Operations // <hr> // // A subvector view can be used like any other dense or sparse vector. This means that with // only a few exceptions all \ref vector_operations and \ref arithmetic_operations can be used. // For instance, the current number of elements can be obtained via the \c size() function, the // current capacity via the \c capacity() function, and the number of non-zero elements via the // \c nonZeros() function. However, since subvectors are references to a specific range of a // vector, several operations are not possible, such as resizing and swapping. The following // example shows this by means of a dense subvector view: \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing and initialization // Creating a view on the range [5..15] of vector v auto sv = subvector( v, 5UL, 10UL ); sv.size(); // Returns the number of elements in the subvector sv.capacity(); // Returns the capacity of the subvector sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector auto sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_subvectors_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse subvectors can be used in all arithmetic operations that any other dense // or sparse vector can be used in. The following example gives an impression of the use of dense // subvectors within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; blaze::CompressedVector<double,blaze::rowVector> s1, s2; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> A; auto sv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1 sv = d2; // Dense vector initialization of the range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19] d3 = sv + d2; // Dense vector/dense vector addition s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition d2 = sv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9] subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) *= sv; // Multiplication assignment double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors \endcode // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // Usually subvectors can be defined anywhere within a vector. They may start at any position and // may have an arbitrary size (only restricted by the size of the underlying vector). However, in // contrast to vectors themselves, which are always properly aligned in memory and therefore can // provide maximum performance, this means that subvectors in general have to be considered to be // unaligned. This can be made explicit by the \c blaze::unaligned flag: \code using blaze::unaligned; blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Identical creations of an unaligned subvector in the range [8..23] auto sv1 = subvector ( x, 8UL, 16UL ); auto sv2 = subvector<unaligned>( x, 8UL, 16UL ); auto sv3 = subvector<8UL,16UL> ( x ); auto sv4 = subvector<unaligned,8UL,16UL>( x ); \endcode // All of these calls to the \c subvector() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide // full flexibility in the creation of subvectors, this might result in performance disadvantages // in comparison to vector primitives (even in case the specified subvector could be aligned). // Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a vector might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned subvectors. Aligned subvectors are identical to // unaligned subvectors in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying vector. Aligned subvectors are created by // explicitly specifying the \c blaze::aligned flag: \code using blaze::aligned; // Creating an aligned subvector in the range [8..23] auto sv1 = subvector<aligned>( x, 8UL, 16UL ); auto sv2 = subvector<aligned,8UL,16UL>( x ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of the subvector must be aligned. The following source code gives some examples // for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double // values into a SIMD vector: \code using blaze::aligned; blaze::DynamicVector<double,blaze::columnVector> d( 17UL ); // ... Resizing and initialization // OK: Starts at the beginning, i.e. the first element is aligned auto dsv1 = subvector<aligned>( d, 0UL, 13UL ); // OK: Start index is a multiple of 4, i.e. the first element is aligned auto dsv2 = subvector<aligned>( d, 4UL, 7UL ); // OK: The start index is a multiple of 4 and the subvector includes the last element auto dsv3 = subvector<aligned>( d, 8UL, 9UL ); // Error: Start index is not a multiple of 4, i.e. the first element is not aligned auto dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense subvectors. // In contrast, aligned sparse subvectors at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case // the \c blaze::aligned flag is specified during setup, an aligned subvector is created: \code using blaze::aligned; blaze::CompressedVector<double,blaze::rowVector> x; // ... Resizing and initialization // Creating an aligned subvector in the range [8..23] auto sv1 = subvector<aligned>( x, 8UL, 16UL ); auto sv2 = subvector<aligned,8UL,16UL>( x ); \endcode // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_element_selections */ //************************************************************************************************* //**Element Selections***************************************************************************** /*!\page views_element_selections Element Selections // // \tableofcontents // // // Element selections provide views on arbitrary compositions of elements of dense and sparse // vectors. These views act as a reference to the selected elements and represent them as another // dense or sparse vector. This reference is valid and can be used in every way any other dense // or sparse vector can be used as long as the vector containing the elements is not resized or // entirely destroyed. The element selection also acts as an alias to the vector elements in the // specified range: Changes made to the elements (e.g. modifying values, inserting or erasing // elements) are immediately visible in the vector and changes made via the vector are immediately // visible in the elements. // // // \n \section views_element_selections_setup Setup of Element Selections // // An element selection can be created very conveniently via the \c elements() function. It can // be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/Elements.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The indices of the elements to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Selecting the elements 4, 6, 8, and 10 (compile time arguments) auto e1 = elements<4UL,6UL,8UL,10UL>( x ); // Selecting the elements 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto e2 = elements( x, { 3UL, 2UL, 1UL } ); auto e3 = elements( x, list ); // Selecting the elements 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto e4 = elements( x, array ); auto e5 = elements( x, array.data(), array.size() ); // Selecting the element 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto e6 = elements( x, vector ); auto e7 = elements( x, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the elements of the underlying vector in any order. Also note // that it is possible to use the same index multiple times. // // Alternatively it is possible to pass a callable such as a lambda or functor that produces the // indices: \code blaze::DynamicVector<double,blaze::rowVector> x{ 0, 1, 2, 3, 4, 5, 6, 7, 8 }; // Selecting all even elements of the vector, i.e. selecting (0,2,4,6,8) auto e1 = elements( x, []( size_t i ){ return i*2UL; }, 5UL ); // Selecting all odd elements of the vector, i.e. selecting (1,3,5,7) auto e2 = elements( x, []( size_t i ){ return i*2UL+1UL; }, 4UL ); // Reversing the elements of the vector, i.e. selecting (8,7,6,5,4,3,2,1,0) auto e3 = elements( x, [max=v.size()-1UL]( size_t i ){ return max-i; }, 9UL ); \endcode // The \c elements() function returns an expression representing the view on the selected elements. // The type of this expression depends on the given arguments, primarily the type of the vector and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using VectorType = blaze::DynamicVector<int>; using ElementsType = decltype( blaze::elements<4UL,12UL>( std::declval<VectorType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. An element selection // created from a row vector can be used as any other row vector, an element selection created // from a column vector can be used as any other column vector. The view can also be used on both // sides of an assignment: It can either be used as an alias to grant write access to specific // elements of a vector primitive on the left-hand side of an assignment or to grant read-access // to specific elements of a vector primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Selecting the elements 1, 3, 5, and 7 auto e = elements( x, { 1UL, 3UL, 5UL, 7UL } ); // Setting the elements 1, 3, 5, and 7 of x to the 2nd row of matrix A e = row( A, 2UL ); // Setting the elements 2, 4, 6, and 8 of x to y elements( x, { 2UL, 4UL, 6UL, 8UL } ) = y; // Setting the 3rd row of A to the elements 5, 4, 3, and 2 of x row( A, 3UL ) = elements( x, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between y and the 1st row of A x = elements( y + row( A, 1UL ), { 2UL, 3UL, 0UL, 1UL } ) \endcode // Please note that using an element selection, which refers to an index multiple times, on the // left-hand side of an assignment leads to undefined behavior: \code blaze::DynamicVector<int,blaze::rowVector> a{ 1, 2, 3 }; blaze::DynamicVector<int,blaze::rowVector> b{ 1, 2, 3, 4 }; auto e = elements( a, { 1, 1, 1, 1 } ); // Selecting the element 1 four times e = b; // Undefined behavior \endcode // In this example both vectors have the same size, which results in a correct vector assignment, // but the final value of the element at index 1 is unspecified. // // \warning It is the programmer's responsibility to ensure the element selection does not outlive // the viewed vector: \code // Creating an element selection on a temporary vector; results in a dangling reference! auto e = elements<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } ); \endcode // \n \section views_element_selections_element_access Element Access // // The elements of an element selection can be directly accessed via the subscript operator: \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and initialization // Selecting the elements 2, 4, 6, and 8 auto e = elements( v, { 2UL, 4UL, 6UL, 8UL } ); // Setting the 1st element of the element selection, which corresponds to // the element at index 4 in vector v e[1] = 2.0; \endcode // The numbering of the selected elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of selected elements. Alternatively, the elements of an element selection // can be traversed via iterators. Just as with vectors, in case of non-const element selections, // \c begin() and \c end() return an iterator, which allows to manipulate the elements, in case of // constant element selections an iterator to immutable elements is returned: \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating an element selection including specific elements of dense vector v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); // Traversing the elements via iterators to non-const elements for( auto it=e.begin(); it!=e.end(); ++it ) { *it = ...; // OK: Write access to the dense vector value. ... = *it; // OK: Read access to the dense vector value. } // Traversing the elements via iterators to const elements for( auto it=e.cbegin(); it!=e.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense vector value. } \endcode \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating an element selection including specific elements of sparse vector v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); // Traversing the elements via iterators to non-const elements for( auto it=e.begin(); it!=e.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=e.cbegin(); it!=e.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_element_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse element selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256 std::vector<size_t> indices; // ... Selecting indices of the sparse vector auto e = elements( v, indices ); // The subscript operator provides access to the selected elements of the sparse vector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse vector, the element is inserted. e[42] = 2.0; // The second operation for inserting elements via the element selection is the set() function. // In case the element is not contained in the vector it is inserted into the vector, if it is // already contained in the vector its value is modified. e.set( 45UL, -1.2 ); // An alternative for inserting elements into the vector is the insert() function. However, it // inserts the element only in case the element is not already contained in the vector. e.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In case // of element selections, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the selection and that the selections's // capacity is large enough to hold the new element. Note however that due to the nature of an // element selection, which is an alias to arbitrary elements of a sparse vector, the append() // function does not work as efficiently for an element selection as it does for a vector. e.reserve( 10UL ); e.append( 51UL, -2.1 ); \endcode // \n \section views_element_selections_common_operations Common Operations // // An element selection can be used like any other dense or sparse vector. For instance, the // number of selected elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since element selections are references to a specific range of a vector, // several operations are not possible, such as resizing and swapping. The following example // shows this by means of an element selection on a dense vector: \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing and initialization // Selecting the elements 5 and 10 auto e = elements( v, { 5UL, 10UL } ); e.size(); // Returns the number of elements in the element selection e.capacity(); // Returns the capacity of the element selection e.nonZeros(); // Returns the number of non-zero elements contained in the element selection e.resize( 84UL ); // Compilation error: Cannot resize an element selection auto e2 = elements( v, { 15UL, 10UL } ); swap( e, e2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_element_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse element selections can be used in all arithmetic operations that any other // dense or sparse vector can be used in. The following example gives an impression of the use of // dense element selections within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and sparse // element selections with fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; blaze::CompressedVector<double,blaze::rowVector> s1, s2; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> A; std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto e( elements( d1, indices1 ) ); // Selecting the every third element of d1 in the range [0..21] e = d2; // Dense vector assignment to the selected elements elements( d1, indices2 ) = s1; // Sparse vector assignment to the selected elements d3 = e + d2; // Dense vector/dense vector addition s2 = s1 + elements( d1, indices2 ); // Sparse vector/dense vector addition d2 = e * elements( d1, indices3 ); // Component-wise vector multiplication elements( d1, indices2 ) *= 2.0; // In-place scaling of the second selection of elements d2 = elements( d1, indices3 ) * 2.0; // Scaling of the elements in the third selection of elements d2 = 2.0 * elements( d1, indices3 ); // Scaling of the elements in the third selection of elements elements( d1, indices1 ) += d2; // Addition assignment elements( d1, indices2 ) -= s2; // Subtraction assignment elements( d1, indices3 ) *= e; // Multiplication assignment double scalar = elements( d1, indices2 ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * elements( d1, { 3UL, 6UL } ); // Outer product between two vectors \endcode // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref views_submatrices */ //************************************************************************************************* //**Submatrices************************************************************************************ /*!\page views_submatrices Submatrices // // \tableofcontents // // // Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors // provide views on specific parts of vectors. As such, submatrices act as a reference to a // specific block within a matrix. This reference is valid and can be used in evary way any // other dense or sparse matrix can be used as long as the matrix containing the submatrix is // not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements // in the specified block: Changes made to the elements (e.g. modifying values, inserting or // erasing elements) are immediately visible in the matrix and changes made via the matrix are // immediately visible in the submatrix. // // // \n \section views_submatrices_setup Setup of Submatrices // <hr> // // A view on a dense or sparse submatrix can be created very conveniently via the \c submatrix() // function. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/Submatrix.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The first and second parameter specify the row and column of the first element of the submatrix. // The third and fourth parameter specify the number of rows and columns, respectively. The four // parameters can be specified either at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a dense submatrix of size 4x8, starting in row 3 and column 0 (compile time arguments) auto sm1 = submatrix<3UL,0UL,4UL,8UL>( A ); // Creating a dense submatrix of size 8x16, starting in row 0 and column 4 (runtime arguments) auto sm2 = submatrix( A, 0UL, 4UL, 8UL, 16UL ); \endcode // The \c submatrix() function returns an expression representing the submatrix view. The type of // this expression depends on the given submatrix arguments, primarily the type of the matrix and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using SubmatrixType = decltype( blaze::submatrix<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. A submatrix created from // a row-major matrix will itself be a row-major matrix, a submatrix created from a column-major // matrix will be a column-major matrix. The view can also be used on both sides of an assignment: // The submatrix can either be used as an alias to grant write access to a specific submatrix // of a matrix primitive on the left-hand side of an assignment or to grant read-access to // a specific submatrix of a matrix primitive or expression on the right-hand side of an // assignment. The following example demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::columnMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and initialization // Creating a dense submatrix of size 8x4, starting in row 0 and column 2 auto sm = submatrix( A, 0UL, 2UL, 8UL, 4UL ); // Setting the submatrix of A to a 8x4 submatrix of B sm = submatrix( B, 0UL, 0UL, 8UL, 4UL ); // Copying the sparse matrix C into another 8x4 submatrix of A submatrix( A, 8UL, 2UL, 8UL, 4UL ) = C; // Assigning part of the result of a matrix addition to the first submatrix sm = submatrix( B + C, 0UL, 0UL, 8UL, 4UL ); \endcode // \warning It is the programmer's responsibility to ensure the submatrix does not outlive the // viewed matrix: \code // Creating a submatrix on a temporary matrix; results in a dangling reference! auto sm = submatrix<1UL,0UL,2UL,3UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_submatrices_element_access Element Access // <hr> // // The elements of a submatrix can be directly accessed with the function call operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) auto sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode // Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as // with matrices, in case of non-const submatrices, \c begin() and \c end() return an iterator, // which allows to manipuate the elements, in case of constant submatrices an iterator to // immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of matrix A auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { *it = ...; // OK: Write access to the dense submatrix value. ... = *it; // OK: Read access to the dense submatrix value. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense submatrix value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of matrix A auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_submatrices_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse submatrix can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 auto sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A // The function call operator provides access to all possible elements of the sparse submatrix, // including the zero elements. In case the function call operator is used to access an element // that is currently not stored in the sparse submatrix, the element is inserted into the // submatrix. sm(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the submatrix it is inserted into the submatrix, if it is already contained // in the submatrix its value is modified. sm.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the submatrix is the insert() function. However, // it inserts the element only in case the element is not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of submatrices, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according row // or column of the submatrix and that the according row's or column's capacity is large // enough to hold the new element. Note however that due to the nature of a submatrix, which // may be an alias to the middle of a sparse matrix, the append() function does not work as // efficiently for a submatrix as it does for a matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_submatrices_common_operations Common Operations // <hr> // // A submatrix view can be used like any other dense or sparse matrix. This means that with only // a few exceptions all \ref matrix_operations and \ref arithmetic_operations can be used. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // submatrices are views on a specific submatrix of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the a 8x12 submatrix of matrix A auto sm = submatrix( A, 0UL, 0UL, 8UL, 12UL ); sm.rows(); // Returns the number of rows of the submatrix sm.columns(); // Returns the number of columns of the submatrix sm.capacity(); // Returns the capacity of the submatrix sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix auto sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_submatrices_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse submatrices can be used in all arithmetic operations that any other dense // or sparse matrix can be used in. The following example gives an impression of the use of dense // submatrices within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse matrices with // fitting element types: \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization auto sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1 // starting from row 0 and column 0 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix // starting in row 0 and column 8 sm = S1; // Sparse matrix initialization of the second 8x8 submatrix D3 = sm + D2; // Dense matrix/dense matrix addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1 D2 = 2.0 * sm; // Scaling of the a submatrix of D1 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // Usually submatrices can be defined anywhere within a matrix. They may start at any position and // may have an arbitrary extension (only restricted by the extension of the underlying matrix). // However, in contrast to matrices themselves, which are always properly aligned in memory and // therefore can provide maximum performance, this means that submatrices in general have to be // considered to be unaligned. This can be made explicit by the \c blaze::unaligned flag: \code using blaze::unaligned; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0 auto sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); auto sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sm3 = submatrix<0UL,0UL,8UL,8UL> ( A ); auto sm4 = submatrix<unaligned,0UL,0UL,8UL,8UL>( A ); \endcode // All of these calls to the \c submatrix() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide // full flexibility in the creation of submatrices, this might result in performance disadvantages // in comparison to matrix primitives (even in case the specified submatrix could be aligned). // Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a matrix might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned submatrices. Aligned submatrices are identical to // unaligned submatrices in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying matrix. Aligned submatrices are created by // explicitly specifying the \c blaze::aligned flag: \code using blaze::aligned; // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 auto sv1 = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sv2 = submatrix<aligned,0UL,0UL,8UL,8UL>( A ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of each row/column of the submatrix must be aligned. The following source code // gives some examples for a double precision row-major dynamic matrix, assuming that padding is // enabled and that AVX is available, which packs 4 \c double values into a SIMD vector: \code using blaze::aligned; blaze::DynamicMatrix<double,blaze::rowMajor> D( 13UL, 17UL ); // ... Resizing and initialization // OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding) auto dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL ); // OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding) auto dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL ); // OK: First column is a multiple of 4 and the submatrix includes the last row and column auto dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); // Error: First column is not a multiple of 4, i.e. the first element is not aligned auto dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense submatrices. // In contrast, aligned sparse submatrices at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case // the \c blaze::aligned flag is specified during setup, an aligned submatrix is created: \code using blaze::aligned; blaze::CompressedMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 auto sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices // // Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of a 16x16 symmetric matrix SymmetricMatrix< DynamicMatrix<int> > A( 16UL ); // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 auto sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode // It is important to note, however, that (compound) assignments to such submatrices have a // special restriction: The symmetry of the underlying symmetric matrix must not be broken! // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n Previous: \ref views_element_selections &nbsp; &nbsp; Next: \ref views_rows */ //************************************************************************************************* //**Rows******************************************************************************************* /*!\page views_rows Rows // // \tableofcontents // // // Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a // reference to a specific row. This reference is valid and can be used in every way any other // row vector can be used as long as the matrix containing the row is not resized or entirely // destroyed. The row also acts as an alias to the row elements: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix // and changes made via the matrix are immediately visible in the row. // // // \n \section views_rows_setup Setup of Rows // <hr> // // \image html row.png // \image latex row.eps "Row view" width=250pt // // A reference to a dense or sparse row can be created very conveniently via the \c row() function. // It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/Row.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The row index must be in the range from \f$[0..M-1]\f$, where \c M is the total number of rows // of the matrix, and can be specified both at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the 1st row of matrix A (compile time index) auto row1 = row<1UL>( A ); // Creating a reference to the 2nd row of matrix A (runtime index) auto row2 = row( A, 2UL ); \endcode // The \c row() function returns an expression representing the row view. The type of this // expression depends on the given row arguments, primarily the type of the matrix and the compile // time arguments. If the type is required, it can be determined via the \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using RowType = decltype( blaze::row<1UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other row vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. The reference can also be used on // both sides of an assignment: The row can either be used as an alias to grant write access to a // specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access // to a specific row of a matrix primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and initialization // Setting the 2nd row of matrix A to x auto row2 = row( A, 2UL ); row2 = x; // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; // Setting x to the 4th row of the result of the matrix multiplication x = row( A * B, 4UL ); // Setting y to the 2nd row of the result of the sparse matrix multiplication y = row( C * D, 2UL ); \endcode // \warning It is the programmer's responsibility to ensure the row does not outlive the viewed // matrix: \code // Creating a row on a temporary matrix; results in a dangling reference! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_rows_element_access Element Access // <hr> // // The elements of a row can be directly accessed with the subscript operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the 4th row of matrix A auto row4 = row( A, 4UL ); // Setting the 1st element of the dense row, which corresponds // to the 1st element in the 4th row of matrix A row4[1] = 2.0; \endcode // The numbering of the row elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of columns of the referenced matrix. Alternatively, the elements of a // row can be traversed via iterators. Just as with vectors, in case of non-const rows, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // rows an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // OK; Write access to the dense row value ... = *it; // OK: Read access to the dense row value. } // Traversing the elements via iterators to const elements for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense row value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_rows_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse row can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix auto row0( row( A, 0UL ) ); // Reference to the 0th row of A // The subscript operator provides access to all possible elements of the sparse row, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse row, the element is inserted into the row. row0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the row it is inserted into the row, if it is already contained in // the row its value is modified. row0.set( 45UL, -1.2 ); // An alternative for inserting elements into the row is the insert() function. However, // it inserts the element only in case the element is not already contained in the row. row0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse row is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the row and that the row's capacity is large // enough to hold the new element. row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode // \n \section views_rows_common_operations Common Operations // <hr> // // A row view can be used like any other row vector. This means that with only a few exceptions // all \ref vector_operations and \ref arithmetic_operations can be used. For instance, the // current number of elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since rows are references to specific rows of a matrix, several operations // are not possible on views, such as resizing and swapping. The following example shows this by // means of a dense row view: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd row of matrix A auto row2 = row( A, 2UL ); row2.size(); // Returns the number of elements in the row row2.capacity(); // Returns the capacity of the row row2.nonZeros(); // Returns the number of non-zero elements contained in the row row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix auto row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse rows can be used in all arithmetic operations that any other dense or // sparse row vector can be used in. The following example gives an impression of the use of // dense rows within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse rows with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix auto row0( row( A, 0UL ) ); // Reference to the 0th row of A row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A b = row0 + a; // Dense vector/dense vector addition b = c + row( A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, 2UL ); // Component-wise vector multiplication row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling of the 1st row row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors A = trans( c ) * row( A, 1UL ); // Outer product between two vectors \endcode // \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st row of a column-major matrix A auto row1 = row( A, 1UL ); for( auto it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode // However, please note that creating a row view on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a row view on a matrix // with row-major storage format. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th row of the multiplication between A and B ... blaze::DynamicVector<double,blaze::rowVector> x = row( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th row of the column-major matrix A with B. blaze::DynamicVector<double,blaze::rowVector> x = row( A, 15UL ) * B; \endcode // Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible // using a row-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_submatrices &nbsp; &nbsp; Next: \ref views_row_selections */ //************************************************************************************************* //**Row Selections********************************************************************************* /*!\page views_row_selections Row Selections // // \tableofcontents // // // Row selections provide views on arbitrary compositions of rows of dense and sparse matrices. // These views act as a reference to the selected rows and represent them as another dense or // sparse matrix. This reference is valid and can be used in every way any other dense or sparse // matrix can be used as long as the matrix containing the rows is not resized or entirely // destroyed. The row selection also acts as an alias to the matrix elements in the specified // range: Changes made to the rows (e.g. modifying values, inserting or erasing elements) are // immediately visible in the matrix and changes made via the matrix are immediately visible // in the rows. // // // \n \section views_row_selections_setup Setup of Row Selections // // A row selection can be created very conveniently via the \c rows() function. It can be included // via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/Rows.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The indices of the rows to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Selecting the rows 4, 6, 8, and 10 (compile time arguments) auto rs1 = rows<4UL,6UL,8UL,10UL>( A ); // Selecting the rows 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto rs2 = rows( A, { 3UL, 2UL, 1UL } ); auto rs3 = rows( A, list ); // Selecting the rows 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto rs4 = rows( A, array ); auto rs5 = rows( A, array.data(), array.size() ); // Selecting the row 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto rs6 = rows( A, vector ); auto rs7 = rows( A, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the rows of the underlying matrix in any order. Also note // that it is possible to use the same index multiple times. // // Alternatively it is possible to pass a callable such as a lambda or functor that produces the // indices: \code blaze::DynamicMatrix<double,blaze::rowMajor> A( 9UL, 18UL ); // Selecting all even rows of the matrix, i.e. selecting the rows 0, 2, 4, 6, and 8 auto rs1 = rows( A, []( size_t i ){ return i*2UL; }, 5UL ); // Selecting all odd rows of the matrix, i.e. selecting the rows 1, 3, 5, and 7 auto rs2 = rows( A, []( size_t i ){ return i*2UL+1UL; }, 4UL ); // Reversing the rows of the matrix, i.e. selecting the rows 8, 7, 6, 5, 4, 3, 2, 1, and 0 auto rs3 = rows( A, [max=A.rows()-1UL]( size_t i ){ return max-i; }, 9UL ); \endcode // The \c rows() function returns an expression representing the view on the selected rows. The // type of this expression depends on the given arguments, primarily the type of the matrix and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using RowsType = decltype( blaze::rows<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a // row selection will always be treated as a row-major matrix, regardless of the storage order of // the matrix containing the rows. The view can also be used on both sides of an assignment: It // can either be used as an alias to grant write access to specific rows of a matrix primitive // on the left-hand side of an assignment or to grant read-access to specific rows of a matrix // primitive or expression on the right-hand side of an assignment. The following example // demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; blaze::DynamicMatrix<double,blaze::columnMajor> B; blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and initialization // Selecting the rows 1, 3, 5, and 7 of A auto rs = rows( A, { 1UL, 3UL, 5UL, 7UL } ); // Setting rows 1, 3, 5, and 7 of A to row 4 of B rs = rows( B, { 4UL, 4UL, 4UL, 4UL } ); // Setting the rows 2, 4, 6, and 8 of A to C rows( A, { 2UL, 4UL, 6UL, 8UL } ) = C; // Setting the first 4 rows of A to the rows 5, 4, 3, and 2 of C submatrix( A, 0UL, 0UL, 4UL, A.columns() ) = rows( C, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between rows 1, 3, 5, and 7 of A and C B = rows( rs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode // \warning It is the programmer's responsibility to ensure the row selection does not outlive the // viewed matrix: \code // Creating a row selection on a temporary matrix; results in a dangling reference! auto rs = rows<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_row_selections_element_access Element Access // // The elements of a row selection can be directly accessed via the function call operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the first four rows of A in reverse order auto rs = rows( A, { 3UL, 2UL, 1UL, 0UL } ); // Setting the element (0,0) of the row selection, which corresponds // to the element at position (3,0) in matrix A rs(0,0) = 2.0; \endcode // Alternatively, the elements of a row selection can be traversed via (const) iterators. Just as // with matrices, in case of non-const row selection, \c begin() and \c end() return an iterator, // which allows to manipuate the elements, in case of constant row selection an iterator to // immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a selection of rows of matrix A auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { *it = ...; // OK: Write access to the dense value. ... = *it; // OK: Read access to the dense value. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a selection of rows of matrix A auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_row_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse row selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 auto rs = rows( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the rows 10, 20, 30, and 40 of A // The function call operator provides access to all possible elements of the sparse row // selection, including the zero elements. In case the function call operator is used to // access an element that is currently not stored in the sparse row selection, the element // is inserted into the row selection. rs(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the row selection it is inserted into the row selection, if it is already // contained in the row selection its value is modified. rs.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the row selection is the insert() function. // However, it inserts the element only in case the element is not already contained in the // row selection. rs.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of row selections, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according row // of the row selection and that the according row's capacity is large enough to hold the new // element. Note however that due to the nature of a row selection, which may be an alias to // an arbitrary collection of rows, the append() function does not work as efficiently for // a row selection as it does for a matrix. rs.reserve( 2UL, 10UL ); rs.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_row_selections_common_operations Common Operations // // A view on specific rows of a matrix can be used like any other dense or sparse matrix. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // row selections are views on specific rows of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the rows 8, 16, 24, and 32 of matrix A auto rs = rows( A, { 8UL, 16UL, 24UL, 32UL } ); rs.rows(); // Returns the number of rows of the row selection rs.columns(); // Returns the number of columns of the row selection rs.capacity(); // Returns the capacity of the row selection rs.nonZeros(); // Returns the number of non-zero elements contained in the row selection rs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a row selection auto rs2 = rows( A, 9UL, 17UL, 25UL, 33UL ); swap( rs, rs2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_row_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse row selections can be used in all arithmetic operations that any other // dense or sparse matrix can be used in. The following example gives an impression of the use // of dense row selctions within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and // sparse matrices with fitting element types: \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto rs = rows( D1, indices1 ); // Selecting the every third row of D1 in the range [0..21] rs = D2; // Dense matrix assignment to the selected rows rows( D1, indices2 ) = S1; // Sparse matrix assignment to the selected rows D3 = rs + D2; // Dense matrix/dense matrix addition S2 = S1 - rows( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = rs % rows( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 = rows( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication rows( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of rows D2 = rows( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of rows D2 = 2.0 * rows( D1, indices3 ); // Scaling of the elements in the third selection of rows rows( D1, indices1 ) += D2; // Addition assignment rows( D1, indices2 ) -= S1; // Subtraction assignment rows( D1, indices3 ) %= rs; // Schur product assignment a = rows( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_row_selections_on_column_major_matrix Row Selections on Column-Major Matrices // // Especially noteworthy is that row selections can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st and 3rd row of a column-major matrix A auto rs = rows( A, { 1UL, 3UL } ); // Traversing row 0 of the selection, which corresponds to the 1st row of matrix A for( auto it=rs.begin( 0UL ); it!=rs.end( 0UL ); ++it ) { // ... } \endcode // However, please note that creating a row selection on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a row selection on a matrix // with row-major storage format. This is due to the non-contiguous storage of the matrix elements. // Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th, 30th, and 45th row of the multiplication between A and B ... blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A * B, { 15UL, 30UL, 45UL } ); // ... is essentially the same as the following computation, which multiplies // the 15th, 30th, and 45th row of the column-major matrix A with B. blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A, { 15UL, 30UL, 45UL } ) * B; \endcode // Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible // using a row-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_rows &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns**************************************************************************************** /*!\page views_columns Columns // // \tableofcontents // // // Just as rows provide a view on a specific row of a matrix, columns provide views on a specific // column of a dense or sparse matrix. As such, columns act as a reference to a specific column. // This reference is valid an can be used in every way any other column vector can be used as long // as the matrix containing the column is not resized or entirely destroyed. Changes made to the // elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the // matrix and changes made via the matrix are immediately visible in the column. // // // \n \section views_colums_setup Setup of Columns // <hr> // // \image html column.png // \image latex column.eps "Column view" width=250pt // // A reference to a dense or sparse column can be created very conveniently via the \c column() // function. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/Column.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The column index must be in the range from \f$[0..N-1]\f$, where \c N is the total number of // columns of the matrix, and can be specified both at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a reference to the 1st column of matrix A (compile time index) auto col1 = column<1UL>( A ); // Creating a reference to the 2nd column of matrix A (runtime index) auto col2 = column( A, 2UL ); \endcode // The \c column() function returns an expression representing the column view. The type of this // expression depends on the given column arguments, primarily the type of the matrix and the // compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnType = decltype( blaze::column<1UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other column vector, i.e. it can be assigned to, it // can be copied from, and it can be used in arithmetic operations. The reference can also be used // on both sides of an assignment: The column can either be used as an alias to grant write access // to a specific column of a matrix primitive on the left-hand side of an assignment or to grant // read-access to a specific column of a matrix primitive or expression on the right-hand side // of an assignment. The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::columnVector> x; blaze::CompressedVector<double,blaze::columnVector> y; blaze::DynamicMatrix<double,blaze::columnMajor> A, B; blaze::CompressedMatrix<double,blaze::columnMajor> C, D; // ... Resizing and initialization // Setting the 1st column of matrix A to x auto col1 = column( A, 1UL ); col1 = x; // Setting the 4th column of matrix B to y column( B, 4UL ) = y; // Setting x to the 2nd column of the result of the matrix multiplication x = column( A * B, 2UL ); // Setting y to the 2nd column of the result of the sparse matrix multiplication y = column( C * D, 2UL ); \endcode // \warning It is the programmer's responsibility to ensure the column does not outlive the // viewed matrix: \code // Creating a column on a temporary matrix; results in a dangling reference! auto col1 = column<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_columns_element_access Element Access // <hr> // // The elements of a column can be directly accessed with the subscript operator. \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a view on the 4th column of matrix A auto col4 = column( A, 4UL ); // Setting the 1st element of the dense column, which corresponds // to the 1st element in the 4th column of matrix A col4[1] = 2.0; \endcode // The numbering of the column elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of rows of the referenced matrix. Alternatively, the elements of a column // can be traversed via iterators. Just as with vectors, in case of non-const columns, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // columns an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A auto col31 = column( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // OK; Write access to the dense column value ... = *it; // OK: Read access to the dense column value. } // Traversing the elements via iterators to const elements for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense column value. } \endcode \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A auto col31 = column( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_columns_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse column can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 100UL, 10UL ); // Non-initialized 100x10 matrix auto col0( column( A, 0UL ) ); // Reference to the 0th column of A // The subscript operator provides access to all possible elements of the sparse column, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse column, the element is inserted into the column. col0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the column it is inserted into the column, if it is already contained // in the column its value is modified. col0.set( 45UL, -1.2 ); // An alternative for inserting elements into the column is the insert() function. However, // it inserts the element only in case the element is not already contained in the column. col0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse column is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the column and that the column's capacity is // large enough to hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); \endcode // \n \section views_columns_common_operations Common Operations // <hr> // // A column view can be used like any other column vector. This means that with only a few // exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance, // the current number of elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since columns are references to specific columns of a matrix, several // operations are not possible on views, such as resizing and swapping. The following example // shows this by means of a dense column view: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd column of matrix A auto col2 = column( A, 2UL ); col2.size(); // Returns the number of elements in the column col2.capacity(); // Returns the capacity of the column col2.nonZeros(); // Returns the number of non-zero elements contained in the column col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix auto col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_columns_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse columns can be used in all arithmetic operations that any other dense or // sparse column vector can be used in. The following example gives an impression of the use of // dense columns within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse columns with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::columnMajor> A( 2UL, 4UL ); // Non-initialized 2x4 matrix auto col0( column( A, 0UL ) ); // Reference to the 0th column of A col0[0] = 0.0; // Manual initialization of the 0th column of A col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A b = col0 + a; // Dense vector/dense vector addition b = c + column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * column( A, 2UL ); // Component-wise vector multiplication column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, 1UL ); // Scaling of the 1st column column( A, 2UL ) += a; // Addition assignment column( A, 2UL ) -= c; // Subtraction assignment column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors A = column( A, 1UL ) * trans( c ); // Outer product between two vectors \endcode // \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that column views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st column of a column-major matrix A auto col1 = column( A, 1UL ); for( auto it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode // However, please note that creating a column view on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a column view on a matrix // with column-major storage format. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two row-major matrices blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th column of the multiplication between A and B ... blaze::DynamicVector<double,blaze::columnVector> x = column( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // A with the 15th column of the row-major matrix B. blaze::DynamicVector<double,blaze::columnVector> x = A * column( B, 15UL ); \endcode // Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible // using a column-major storage order for matrix \c B would result in a more efficient evaluation. // // \n Previous: \ref views_row_selections &nbsp; &nbsp; Next: \ref views_column_selections */ //************************************************************************************************* //**Column Selections****************************************************************************** /*!\page views_column_selections Column Selections // // \tableofcontents // // // Column selections provide views on arbitrary compositions of columns of dense and sparse // matrices. These views act as a reference to the selected columns and represent them as another // dense or sparse matrix. This reference is valid and can be used in every way any other dense // or sparse matrix can be used as long as the matrix containing the columns is not resized or // entirely destroyed. The column selection also acts as an alias to the matrix elements in the // specified range: Changes made to the columns (e.g. modifying values, inserting or erasing // elements) are immediately visible in the matrix and changes made via the matrix are immediately // visible in the columns. // // // \n \section views_column_selections_setup Setup of Column Selections // // A column selection can be created very conveniently via the \c columns() function. It can be // included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/Columns.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The indices of the columns to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Selecting the columns 4, 6, 8, and 10 (compile time arguments) auto cs1 = columns<4UL,6UL,8UL,10UL>( A ); // Selecting the columns 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto cs2 = columns( A, { 3UL, 2UL, 1UL } ); auto cs3 = columns( A, list ); // Selecting the columns 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto cs4 = columns( A, array ); auto cs5 = columns( A, array.data(), array.size() ); // Selecting the column 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto cs6 = columns( A, vector ); auto cs7 = columns( A, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the columns of the underlying matrix in any order. Also note // that it is possible to use the same index multiple times. // // Alternatively it is possible to pass a callable such as a lambda or functor that produces the // indices: \code blaze::DynamicMatrix<double,blaze::columnMajor> A( 18UL, 9UL ); // Selecting all even columns of the matrix, i.e. selecting the columns 0, 2, 4, 6, and 8 auto cs1 = columns( A, []( size_t i ){ return i*2UL; }, 5UL ); // Selecting all odd columns of the matrix, i.e. selecting the columns 1, 3, 5, and 7 auto cs2 = columns( A, []( size_t i ){ return i*2UL+1UL; }, 4UL ); // Reversing the columns of the matrix, i.e. selecting the columns 8, 7, 6, 5, 4, 3, 2, 1, and 0 auto cs3 = columns( A, [max=A.columns()-1UL]( size_t i ){ return max-i; }, 9UL ); \endcode // The \c columns() function returns an expression representing the view on the selected columns. // The type of this expression depends on the given arguments, primarily the type of the matrix // and the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnsType = decltype( blaze::columns<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a // column selection will always be treated as a column-major matrix, regardless of the storage // order of the matrix containing the columns. The view can also be used on both sides of an // assignment: It can either be used as an alias to grant write access to specific columns of a // matrix primitive on the left-hand side of an assignment or to grant read-access to specific // columns of a matrix primitive or expression on the right-hand side of an assignment. The // following example demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; blaze::DynamicMatrix<double,blaze::rowMajor> B; blaze::CompressedMatrix<double,blaze::columnMajor> C; // ... Resizing and initialization // Selecting the columns 1, 3, 5, and 7 of A auto cs = columns( A, { 1UL, 3UL, 5UL, 7UL } ); // Setting columns 1, 3, 5, and 7 of A to column 4 of B cs = columns( B, { 4UL, 4UL, 4UL, 4UL } ); // Setting the columns 2, 4, 6, and 8 of A to C columns( A, { 2UL, 4UL, 6UL, 8UL } ) = C; // Setting the first 4 columns of A to the columns 5, 4, 3, and 2 of C submatrix( A, 0UL, 0UL, A.rows(), 4UL ) = columns( C, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between columns 1, 3, 5, and 7 of A and C B = columns( cs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode // \warning It is the programmer's responsibility to ensure the column selection does not outlive // the viewed matrix: \code // Creating a column selection on a temporary matrix; results in a dangling reference! auto cs = columns<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_column_selections_element_access Element Access // // The elements of a column selection can be directly accessed via the function call operator: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a view on the first four columns of A in reverse order auto cs = columns( A, { 3UL, 2UL, 1UL, 0UL } ); // Setting the element (0,0) of the column selection, which corresponds // to the element at position (0,3) in matrix A cs(0,0) = 2.0; \endcode // Alternatively, the elements of a column selection can be traversed via (const) iterators. // Just as with matrices, in case of non-const column selection, \c begin() and \c end() return // an iterator, which allows to manipuate the elements, in case of constant column selection an // iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... Resizing and initialization // Creating a reference to a selection of columns of matrix A auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th column via iterators to non-const elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { *it = ...; // OK: Write access to the dense value. ... = *it; // OK: Read access to the dense value. } // Traversing the elements of the 1st column via iterators to const elements for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense value. } \endcode \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... Resizing and initialization // Creating a reference to a selection of columns of matrix A auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th column via iterators to non-const elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st column via iterators to const elements for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_column_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse column selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 512UL, 256UL ); // Non-initialized matrix of size 512x256 auto cs = columns( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the columns 10, 20, 30, and 40 of A // The function call operator provides access to all possible elements of the sparse column // selection, including the zero elements. In case the function call operator is used to // access an element that is currently not stored in the sparse column selection, the element // is inserted into the column selection. cs(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the column selection it is inserted into the column selection, if it is // already contained in the column selection its value is modified. cs.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the column selection is the insert() function. // However, it inserts the element only in case the element is not already contained in the // column selection. cs.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of column selections, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according column // of the column selection and that the according column's capacity is large enough to hold the // new element. Note however that due to the nature of a column selection, which may be an alias // to an arbitrary collection of columns, the append() function does not work as efficiently // for a column selection as it does for a matrix. cs.reserve( 2UL, 10UL ); cs.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_column_selections_common_operations Common Operations // // A view on specific columns of a matrix can be used like any other dense or sparse matrix. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // column selections are views on specific columns of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the columns 8, 16, 24, and 32 of matrix A auto cs = columns( A, { 8UL, 16UL, 24UL, 32UL } ); cs.rows(); // Returns the number of rows of the column selection cs.columns(); // Returns the number of columns of the column selection cs.capacity(); // Returns the capacity of the column selection cs.nonZeros(); // Returns the number of non-zero elements contained in the column selection cs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a column selection auto cs2 = columns( A, 9UL, 17UL, 25UL, 33UL ); swap( cs, cs2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_column_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse column selections can be used in all arithmetic operations that any other // dense or sparse matrix can be used in. The following example gives an impression of the use of // dense column selctions within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and // sparse matrices with fitting element types: \code blaze::DynamicMatrix<double,blaze::columnMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::columnMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto cs = columns( D1, indices1 ); // Selecting the every third column of D1 in the range [0..21] cs = D2; // Dense matrix assignment to the selected columns columns( D1, indices2 ) = S1; // Sparse matrix assignment to the selected columns D3 = cs + D2; // Dense matrix/dense matrix addition S2 = S1 - columns( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = cs % columns( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 = columns( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication columns( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of columns D2 = columns( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of columns D2 = 2.0 * columns( D1, indices3 ); // Scaling of the elements in the third selection of columns columns( D1, indices1 ) += D2; // Addition assignment columns( D1, indices2 ) -= S1; // Subtraction assignment columns( D1, indices3 ) %= cs; // Schur product assignment a = columns( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_column_selections_on_row_major_matrix Column Selections on a Row-Major Matrix // // Especially noteworthy is that column selections can be created for both row-major and // column-major matrices. Whereas the interface of a row-major matrix only allows to traverse a // row directly and the interface of a column-major matrix only allows to traverse a column, via // views it is possible to traverse a row of a column-major matrix or a column of a row-major // matrix. For instance: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st and 3rd column of a column-major matrix A auto cs = columns( A, { 1UL, 3UL } ); // Traversing column 0 of the selection, which corresponds to the 1st column of matrix A for( auto it=cs.begin( 0UL ); it!=cs.end( 0UL ); ++it ) { // ... } \endcode // However, please note that creating a column selection on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a column selection on a // matrix with column-major storage format. This is due to the non-contiguous storage of the // matrix elements. Therefore care has to be taken in the choice of the most suitable storage // order: \code // Setup of two row-major matrices blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th, 30th, and 45th column of the multiplication between A and B ... blaze::DynamicMatrix<double,blaze::columnMajor> x = columns( A * B, { 15UL, 30UL, 45UL } ); // ... is essentially the same as the following computation, which multiplies // A with the 15th, 30th, and 45th column of the row-major matrix B. blaze::DynamicMatrix<double,blaze::columnMajor> x = A * column( B, { 15UL, 30UL, 45UL } ); \endcode // Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible // using a column-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_columns &nbsp; &nbsp; Next: \ref views_bands */ //************************************************************************************************* //**Bands****************************************************************************************** /*!\page views_bands Bands // // \tableofcontents // // // Bands provide views on a specific band of a dense or sparse matrix (e.g. the diagonal, the // subdiagonal, ...). As such, bands act as a reference to a specific band. This reference // is valid and can be used in every way any other vector can be used as long as the matrix // containing the band is not resized or entirely destroyed. The band also acts as an alias to // the band elements: Changes made to the elements (e.g. modifying values, inserting or erasing // elements) are immediately visible in the matrix and changes made via the matrix are immediately // visible in the band. // // // \n \section views_bands_setup Setup of Bands // <hr> // // \image html band.png // \image latex band.eps "Band view" width=250pt // // A reference to a dense or sparse band can be created very conveniently via the \c band() // function. It can be included via the header files \code #include <blaze/Blaze.h> // or #include <blaze/Math.h> // or #include <blaze/math/Band.h> \endcode // and forward declared via the header file \code #include <blaze/Forward.h> \endcode // The band index must be in the range from \f$[min(0,1-M)..max(0,N-1)]\f$, where \c M is the // total number of rows and \c N is the total number of columns, and can be specified both at // compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the 1st lower band of matrix A (compile time index) auto band1 = band<-1L>( A ); // Creating a reference to the 2nd upper band of matrix A (runtime index) auto band2 = band( A, 2L ); \endcode // In addition, the \c diagonal() function provides a convenient shortcut for the setup of a view // on the diagonal of a dense or sparse matrix. It has the same effect as calling the \c band() // function with a compile time index of 0: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the diagonal of matrix A via the band() and diagonal() functions auto diag1 = band<0L>( A ); auto diag2 = diagonal( A ); static_assert( blaze::IsSame< decltype(diag1), decltype(diag2) >::value, "Non-identical types detected" ); \endcode // Both the \c band() and the \c diagonal() function return an expression representing the band // view. The type of this expression depends on the given arguments, primarily the type of the // matrix and the compile time arguments. If the type is required, it can be determined via // \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using BandType = decltype( blaze::band<1L>( std::declval<MatrixType>() ) ); using DiagonalType = decltype( blaze::diagonal( std::declval<MatrixType>() ) ); \endcode // This resulting view can be treated as any other vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. By default, bands are considered // column vectors, but this setting can be changed via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG switch // (see \ref transpose_flag). The reference can also be used on both sides of an assignment: The // band can either be used as an alias to grant write access to a specific band of a matrix // primitive on the left-hand side of an assignment or to grant read-access to a specific band of // a matrix primitive or expression on the right-hand side of an assignment. The following example // demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and initialization // Setting the 2nd upper band of matrix A to x auto band2 = band( A, 2L ); band2 = x; // Setting the 3rd upper band of matrix B to y band( B, 3L ) = y; // Setting x to the 2nd lower band of the result of the matrix multiplication x = band( A * B, -2L ); // Setting y to the 2nd upper band of the result of the sparse matrix multiplication y = band( C * D, 2L ); \endcode // \warning It is the programmer's responsibility to ensure the band does not outlive the viewed // matrix: \code // Creating a band on a temporary matrix; results in a dangling reference! auto band1 = band<1L>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_bands_element_access Element Access // <hr> // // The elements of a band can be directly accessed with the subscript operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the 4th upper band of matrix A auto band4 = band( A, 4L ); // Setting the 1st element of the dense band, which corresponds // to the 1st element in the 4th upper band of matrix A band4[1] = 2.0; \endcode // The numbering of the band elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of elements of the referenced band. Alternatively, the elements of a band // can be traversed via iterators. Just as with vectors, in case of non-const band, \c begin() and // \c end() return an iterator, which allows to manipulate the elements, in case of constant bands // an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 5th upper band of matrix A auto band5 = band( A, 5L ); // Traversing the elements via iterators to non-const elements for( auto it=band5.begin(); it!=band5.end(); ++it ) { *it = ...; // OK; Write access to the dense band value ... = *it; // OK: Read access to the dense band value. } // Traversing the elements via iterators to const elements for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense band value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 5th band of matrix A auto band5 = band( A, 5L ); // Traversing the elements via iterators to non-const elements for( auto it=band5.begin(); it!=band5.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_bands_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse band can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix auto diag( band( A, 0L ) ); // Reference to the diagonal of A // The subscript operator provides access to all possible elements of the sparse band, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse band, the element is inserted into the band. diag[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the band it is inserted into the band, if it is already contained in // the band its value is modified. diag.set( 45UL, -1.2 ); // An alternative for inserting elements into the band is the insert() function. However, // it inserts the element only in case the element is not already contained in the band. diag.insert( 50UL, 3.7 ); \endcode // \n \section views_bands_common_operations Common Operations // <hr> // // A band view can be used like any other column vector. This means that with only a few // exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance, // the current number of band elements can be obtained via the \c size() function, the current // capacity via the \c capacity() function, and the number of non-zero elements via the // \c nonZeros() function. However, since bands are references to specific bands of a matrix, // several operations are not possible, such as resizing and swapping. The following example // shows this by means of a dense band view: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd upper band of matrix A auto band2 = band( A, 2L ); band2.size(); // Returns the number of elements in the band band2.capacity(); // Returns the capacity of the band band2.nonZeros(); // Returns the number of non-zero elements contained in the band band2.resize( 84UL ); // Compilation error: Cannot resize a single band of a matrix auto band3 = band( A, 3L ); swap( band2, band3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_bands_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse bands can be used in all arithmetic operations that any other dense or // sparse vector can be used in. The following example gives an impression of the use of dense // bands within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse bands with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix auto band1( band( A, 1L ) ); // Reference to the 1st upper band of A auto diag ( band( A, 0L ) ); // Reference to the diagonal of A band1[0] = 0.0; // Manual initialization of the 1st upper band of A diag = 1.0; // Homogeneous initialization of the diagonal of A band( A, -1L ) = a; // Dense vector initialization of the 1st lower band of A band( A, -2L ) = c; // Sparse vector initialization of the 2nd lower band of A b = diag + a; // Dense vector/dense vector addition b = c + band( A, -1L ); // Sparse vector/dense vector addition b = diag * band( A, -2L ); // Component-wise vector multiplication band( A, -1L ) *= 2.0; // In-place scaling of the 1st upper band b = band( A, -1L ) * 2.0; // Scaling of the 1st upper band b = 2.0 * band( A, -1L ); // Scaling of the 1st upper band band( A, -2L ) += a; // Addition assignment band( A, -2L ) -= c; // Subtraction assignment band( A, -2L ) *= band( A, 0L ); // Multiplication assignment double scalar = trans( c ) * band( A, -1L ); // Scalar/dot/inner product between two vectors A = band( A, -1L ) * trans( c ); // Outer product between two vectors \endcode // \n Previous: \ref views_column_selections &nbsp; &nbsp; Next: \ref arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations************************************************************************** /*!\page arithmetic_operations Arithmetic Operations // // \tableofcontents // // // \b Blaze provides the following arithmetic operations for vectors and matrices: // // <ul> // <li> \ref addition // <ul> // <li> \ref vector_vector_addition </li> // <li> \ref matrix_matrix_addition </li> // <li> \ref scalar_addition </li> // </ul> // </li> // <li> \ref subtraction // <ul> // <li> \ref vector_vector_subtraction </li> // <li> \ref matrix_matrix_subtraction </li> // <li> \ref scalar_subtraction </li> // </ul> // </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // <li> \ref vector_kronecker_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication // <ul> // <li> \ref schur_product </li> // <li> \ref matrix_product </li> // <li> \ref matrix_kronecker_product </li> // </ul> // </li> // </ul> // // \n Previous: \ref views_bands &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition*************************************************************************************** /*!\page addition Addition // // \n \section vector_vector_addition Vector/Vector Addition // <hr> // // The addition of vectors is as intuitive as the addition of scalar values. For the addition of // any two vectors the addition operator (i.e. \c operator+()) can be used. It even enables the // addition of dense and sparse vectors: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 + v2; // Addition of a dense and a sparse column vector of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to add vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 + v2; // Compilation error: Cannot add a column vector and a row vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode // Also note that the addition of two vectors with the same element type is favorable due to // possible vectorization of the operation: \code blaze::DynamicVector<double> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 + v2; // Vectorized addition of two double precision vectors \endcode // \n \section outer_sum Outer Sum // <hr> // // The addition between a column vector and a row vector results in the outer sum of the two // vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; // Results in the matrix // // ( 1 5 0 6 ) // A = ( 4 8 3 9 ) // ( -2 2 -3 3 ) // blaze::StaticMatrix<int,3UL,4UL> M1 = v1 + v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) + v2; \endcode // \n \section matrix_matrix_addition Matrix/Matrix Addition // <hr> // // For the addition of any two matrices the addition operator (i.e. \c operator+()) can be used. // It even enables the addition of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::CompressedMatrix<size_t,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<float,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 + M2; // Addition of a sparse column-major and a dense row-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to add row-major and column-major matrices. // Note however that in favor of performance the addition of two matrices with the same storage // order is favorable. The same argument holds for the element type: In case two matrices with // the same element type are added, the performance can be much higher due to vectorization of // the operation. \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices \endcode // \n \section scalar_addition Scalar Addition // <hr> // // For convenience it is also possible to add a scalar value to a dense vector or dense matrix, // which has the same effect as adding a uniform vector or matrix. In \b Blaze it is possible to // use all built-in/fundamental data types except bool as scalar values. Additionally, it is // possible to use \c std::complex values with the same built-in data types as element type. // Examples: \code blaze::StaticVector<int,3UL> v1{ 3, 2, 5, -4, 1, 6 }; blaze::DynamicVector<int> v2 = v1 + 2; // Results in { 5, 4, 7, -2, 3, 8 } blaze::CompressedVector<int> v3 = 3 + v1; // Results in { 6, 5, 8, -1, 4, 9 } \endcode \code blaze::StaticMatrix<int,2UL,3UL> M1{ { 3, 2, 5 }, { -4, 1, 6 } }; blaze::DynamicMatrix<int> M2 = M1 + 2; // Results in { { 5, 4, 7 }, { -2, 3, 8 } } blaze::CompressedMatrix<int> M3 = 3 + M1; // Results in { { 6, 5, 8 }, { -1, 4, 9 } } \endcode // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref subtraction */ //************************************************************************************************* //**Subtraction************************************************************************************ /*!\page subtraction Subtraction // // \n \section vector_vector_subtraction Vector/Vector Subtraction // <hr> // // The subtraction of vectors works exactly as intuitive as the addition, but with the subtraction // operator (i.e. \c operator-()). It also enables the subtraction of dense and sparse vectors: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 - v2; // Subtraction of a dense and a sparse column vector of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // subtract vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors \endcode // Also note that the subtraction of two vectors with the same element type is favorable due to // possible vectorization of the operation: \code blaze::DynamicVector<double> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 - v2; // Vectorized subtraction of two double precision vectors \endcode // \n \section outer_difference Outer Difference // <hr> // // The subtraction between a column vector and a row vector results in the outer difference of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; // Results in the matrix // // ( 3 -1 4 -2 ) // A = ( 6 2 7 1 ) // ( 0 -4 1 -5 ) // StaticMatrix<int,3UL,3UL> M1 = v1 - v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) - v2; \endcode // \n \section matrix_matrix_subtraction Matrix/Matrix Subtraction // <hr> // // For the subtraction of any two matrices the subtraction operator (i.e. \c operator-()) can be // used. It even enables the subtraction of dense and sparse matrices: \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to subtract row-major and column-major // matrices. Note however that in favor of performance the subtraction of two matrices with the // same storage order is favorable. The same argument holds for the element type: In case two // matrices with the same element type are subtracted, the performance can be much higher due // to vectorization of the operation. \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices \endcode // \n \section scalar_subtraction Scalar Subtraction // <hr> // // For convenience it is also possible to subtract a scalar value from a dense vector or dense // matrix, which has the same effect as subtracting a uniform vector or matrix. In \b Blaze it is // possible to use all built-in/fundamental data types except bool as scalar values. Additionally, // it is possible to use \c std::complex values with the same built-in data types as element type. // Examples: \code blaze::StaticVector<int,3UL> v1{ 3, 2, 5, -4, 1, 6 }; blaze::DynamicVector<int> v2 = v1 - 2; // Results in { 1, 0, 3, -6, -1, 4 } blaze::CompressedVector<int> v3 = 3 - v1; // Results in { 0, 1, -2, 7, 2, -3 } \endcode \code blaze::StaticMatrix<int,2UL,3UL> M1{ { 3, 2, 5 }, { -4, 1, 6 } }; blaze::DynamicMatrix<int> M2 = M1 - 2; // Results in { { 1, 0, 3 }, { -6, -1, 4 } } blaze::CompressedMatrix<int> M3 = 3 - M1; // Results in { { 0, 1, -2 }, { 7, 2, -3 } } \endcode // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication************************************************************************** /*!\page scalar_multiplication Scalar Multiplication // // The scalar multiplication is the multiplication of vector or a matrix with a scalar value. // Alternatively it is also possible to divide a vector or a matrix by a scalar value. In \b Blaze // it is possible to use all built-in/fundamental data types except bool as scalar values. // Additionally, it is possible to use \c std::complex values with the same built-in data types // as element type. \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; blaze::DynamicVector<double> v2 = v1 * 1.2; // Scalar multiplication blaze::CompressedVector<float> v3 = -0.3F * v1; // Scalar multiplication blaze::DynamicVector<double> v4 = v1 / 1.2; // Scalar division blaze::CompressedVector<float> v5 = 12.0F / v1; // Scalar division (only dense vectors) \endcode \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; blaze::DynamicMatrix<double> M2 = M1 * 1.2; // Scalar multiplication blaze::CompressedMatrix<float> M3 = -0.3F * M1; // Scalar multiplication blaze::DynamicMatrix<double> M4 = M1 / 1.2; // Scalar division blaze::CompressedMatrix<float> M5 = 12.0F / M1; // Scalar division (only dense matrices) \endcode // Vectors and matrices cannot be used for as scalar value for scalar multiplications or divisions // (see the following example). However, each vector and matrix provides the \c scale() function, // which can be used to scale a vector or matrix element-wise with arbitrary scalar data types: \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; blaze::StaticMatrix<int,3UL,3UL> scalar; M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication M1.scale( scalar ); // Scalar multiplication \endcode // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref componentwise_multiplication */ //************************************************************************************************* //**Vector/Vector Multiplication******************************************************************* /*!\page vector_vector_multiplication Vector/Vector Multiplication // // \n \section componentwise_multiplication Componentwise Multiplication // <hr> // // Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or // blaze::rowVector) via the multiplication operator results in a componentwise multiplication // of the two vectors: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and // a dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row // vectors. The result is a dense row vector. \endcode // \n \section inner_product Inner Product / Scalar Product / Dot Product // <hr> // // The multiplication between a row vector and a column vector results in an inner product between // the two vectors: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; int result = v1 * v2; // Results in the value 15 \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = v1 * trans( v2 ); // Also results in the value 15 \endcode // Alternatively, either the \c inner() function, the \c dot() function or the comma operator can // be used for any combination of vectors (row or column vectors) to perform an inner product: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; // All alternatives for the inner product between a column vector and a row vector int result1 = trans( v1 ) * trans( v2 ); int result2 = inner( v1, v2 ); int result3 = dot( v1, v2 ); int result4 = (v1,v2); \endcode // When using the comma operator, please note the brackets embracing the inner product expression. // Due to the low precedence of the comma operator (lower even than the assignment operator) these // brackets are strictly required for a correct evaluation of the inner product. // // // \n \section outer_product Outer Product // <hr> // // The multiplication between a column vector and a row vector results in the outer product of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; // Results in the matrix // // ( -2 6 -4 8 ) // A = ( -5 15 -10 20 ) // ( 1 -3 2 -4 ) // StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) * v2; \endcode // Alternatively, the \c outer() function can be used for any combination of vectors (row or column // vectors) to perform an outer product: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2, 4 }; blaze::StaticMatrix<int,3UL,4UL> M1 = outer( v1, v2 ); // Outer product between two row vectors \endcode // \n \section cross_product Cross Product // <hr> // // Two vectors with the same transpose flag can be multiplied via the cross product. The cross // product between two vectors \f$ a \f$ and \f$ b \f$ is defined as \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 b_1 - a_1 b_0 \\ \end{array}\right). \f] // Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is // realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%) // can be used in case infix notation is required: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode // Please note that the cross product is restricted to three dimensional (dense and sparse) // column vectors. // // // \n \section vector_kronecker_product Kronecker Product // <hr> // // The Kronecker product of two vectors with the same transpose flag can be computed via the // \a kron() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<double> v1( 28UL ); CompressedVector<float> v2( 17UL ); // ... Initialization of the vectors CompressedVector<double> v3 = kron( v1, v2 ); \endcode // Both dense and sparse vectors can be used for a Kronecker product. It is possible to multiply // two vectors with different element type, as long as the element types themselves can be // multiplied. // // \n Previous: \ref scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector/Vector Division************************************************************************* /*!\page vector_vector_division Vector/Vector Division // // \n \section componentwise_division Componentwise Division // <hr> // // Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector // or blaze::rowVector) via the division operator results in a componentwise division: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a // dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row // vectors. The result is a dense row vector. \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // // \n \section outer_quotient Outer Quotient // <hr> // // The division between a column vector and a row vector results in the outer quotient of the // two vectors: \code blaze::StaticVector<double,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<double,rowVector> v2{ -1, 5, -2, 4 }; // Results in the matrix // // ( -2 0.4 -1 0.5 ) // A = ( -5 1 -2.5 1.25 ) // ( 1 -0.2 0.5 -0.25 ) // blaze::StaticMatrix<int,3UL,4UL> M1 = v1 / v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 5, -2, 4 }; blaze::StaticMatrix<int,3UL,4UL> M1 = trans( v1 ) / v2; \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // \n Previous: \ref vector_vector_multiplication &nbsp; &nbsp; Next: \ref matrix_vector_multiplication */ //************************************************************************************************* //**Matrix/Vector Multiplication******************************************************************* /*!\page matrix_vector_multiplication Matrix/Vector Multiplication // // In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical // textbooks. Just as in textbooks there are two different multiplications between a matrix and // a vector: a matrix/column vector multiplication and a row vector/matrix multiplication: \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::DynamicMatrix; DynamicMatrix<int> M1( 39UL, 12UL ); StaticVector<int,12UL,columnVector> v1; // ... Initialization of the matrix and the vector DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication \endcode // Note that the storage order of the matrix poses no restrictions on the operation. Also note, // that the highest performance for a multiplication between a dense matrix and a dense vector can // be achieved if both the matrix and the vector have the same scalar element type. // // \n Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix/Matrix Multiplication******************************************************************* /*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // \n \section schur_product Componentwise Multiplication / Schur Product // <hr> // // Multiplying two matrices with the same dimensions (i.e. the same number of rows and columns) // via the modulo operator results in a componentwise multiplication (Schur product) of the two // matrices: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 28UL, 35UL ); CompressedMatrix<float> M2( 28UL, 35UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 % M2; \endcode // Both dense and sparse matrices can be used for a Schur product. The storage order of the two // matrices poses no restrictions on the operation, all variations are possible. It is also // possible to multiply two matrices with different element type, as long as the element types // themselves can be multiplied. // // // \n \section matrix_product Matrix Product // <hr> // // The matrix/matrix product can be formulated exactly as in mathematical textbooks: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, 37UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 * M2; \endcode // The storage order of the two matrices poses no restrictions on the operation, all variations // are possible. It is also possible to multiply two matrices with different element type, as // long as the element types themselves can be multiplied and added. Note however that the // highest performance for a multiplication between two matrices can be expected for two // matrices with the same scalar element type. // // In case the resulting matrix is known to be symmetric, Hermitian, lower triangular, upper // triangular, or diagonal, the computation can be optimized by explicitly declaring the // multiplication as symmetric, Hermitian, lower triangular, upper triangular, or diagonal by // means of the \ref matrix_operations_declaration_operations : \code using blaze::DynamicMatrix; DynamicMatrix<double> M1, M2, M3; // ... Initialization of the square matrices M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication as symmetric M3 = declherm( M1 * M2 ); // Declare the result of the matrix multiplication as Hermitian M3 = decllow ( M1 * M2 ); // Declare the result of the matrix multiplication as lower triangular M3 = declupp ( M1 * M2 ); // Declare the result of the matrix multiplication as upper triangular M3 = decldiag( M1 * M2 ); // Declare the result of the matrix multiplication as diagonal \endcode // Using a declaration operation on the a multiplication expression can speed up the computation // by a factor of 2. Note however that the caller of the according declaration operation takes // full responsibility for the correctness of the declaration. Falsely declaring a multiplication // as symmetric, Hermitian, lower triangular, upper triangular, or diagonal leads to undefined // behavior! // // // \n \section matrix_kronecker_product Kronecker Product // <hr> // // The Kronecker product of two matrices can be computed via the \a kron() function: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 28UL, 35UL ); CompressedMatrix<float> M2( 17UL, 11UL ); // ... Initialization of the matrices CompressedMatrix<double> M3 = kron( M1, M2 ); \endcode // Both dense and sparse matrices can be used for a Kronecker product. The storage order of the // two matrices poses no restrictions on the operation, all variations are possible. It is also // possible to multiply two matrices with different element type, as long as the element types // themselves can be multiplied. // // \n Previous: \ref matrix_vector_multiplication &nbsp; &nbsp; Next: \ref bitwise_operations */ //************************************************************************************************* //**Bitwise Operations***************************************************************************** /*!\page bitwise_operations Bitwise Operations // // \tableofcontents // // // \b Blaze provides the following bitwise operations for vectors and matrices: // // <ul> // <li> \ref bitwise_shift // <ul> // <li> \ref vector_vector_shift </li> // <li> \ref matrix_matrix_shift </li> // <li> \ref scalar_shift </li> // </ul> // </li> // <li> \ref bitwise_and // <ul> // <li> \ref vector_vector_bitand </li> // <li> \ref matrix_matrix_bitand </li> // <li> \ref scalar_bitand </li> // </ul> // </li> // <li> \ref bitwise_or // <ul> // <li> \ref vector_vector_bitor </li> // <li> \ref matrix_matrix_bitor </li> // <li> \ref scalar_bitor </li> // </ul> // </li> // <li> \ref bitwise_xor // <ul> // <li> \ref vector_vector_bitxor </li> // <li> \ref matrix_matrix_bitxor </li> // <li> \ref scalar_bitxor </li> // </ul> // </li> // </ul> // // \n Previous: \ref matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref bitwise_shift */ //************************************************************************************************* //**Bitwise Shift********************************************************************************** /*!\page bitwise_shift Bitwise Shift // // \n \section vector_vector_shift Vector/Vector Shift // <hr> // // Via the left-shift operator (i.e. operator<<()) and the right-shift operator (i.e. operator>>()) // it is possible to perform an elementwise shift of a dense vector: \code blaze::DynamicVector<unsigned int> v1( 5UL ), v3; blaze::DynamicVector<unsigned short> v2( 5UL ); // ... Initializing the vectors v3 = v1 << v2; // Elementwise left-shift of a dense column vector v3 = v1 >> v2; // Elementwise right-shift of a dense column vector \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to shift vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<unsigned int,columnVector> v1( 5UL ); blaze::DynamicVector<unsigned int,rowVector> v2( 5UL ); v1 << v2; // Compilation error: Cannot shift a column vector by a row vector v1 << trans( v2 ); // OK: Shifting a column vector by another column vector \endcode // Furthermore, it is possible to use different element types in the two vector operands, but // shifting two vectors with the same element type is favorable due to possible vectorization // of the operation: \code blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 << v2; // Vectorized left-shift of an unsigned int vector \endcode // \n \section matrix_matrix_shift Matrix/Matrix Shift // <hr> // // The left-shift operator (i.e. operator<<()) and the right-shift operator (i.e. operator>>()) // can also be used to perform an elementwise shift of a dense matrix: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 << M2; // Elementwise left-shift of a dense column-major matrix M3 = M1 >> M2; // Elementwise right-shift of a dense column-major matrix \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. The same argument holds for the element type: While it is // possible to use matrices with different element type, using two matrices with the same element // type potentially leads to better performance due to vectorization of the operation. \code blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 << M2; // Vectorized left-shift of an unsigned int matrix \endcode // \n \section scalar_shift Scalar Shift // <hr> // // It is also possible to uniformly shift all elements of a dense vector or dense matrix by means // of a scalar, which has the same effect as shifting by means of a uniform vector or matrix (see // \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix). In \b Blaze it is // possible to use all built-in/fundamental data types except bool as scalar values. Examples: \code blaze::DynamicVector<unsigned int> v1{ 3, 2, 5, 4, 1, 6 }; // Uniform left-shift by one bit of all elements of v1; Results in // // ( 6, 4, 10, 8, 2, 12 ) // blaze::DynamicVector<int> v2( v1 << 1U ); \endcode \code blaze::DynamicMatrix<unsigned int> M1{ { 3, 2, 5 }, { 4, 1, 6 } }; // Uniform left-shift by one bit of all elements of M1; Results in // // ( 6, 4, 10 ) // ( 8, 2, 12 ) // blaze::DynamicMatrix<unsigned int> M2( M1 << 1U ); \endcode // \n Previous: \ref bitwise_operations &nbsp; &nbsp; Next: \ref bitwise_and */ //************************************************************************************************* //**Bitwise AND************************************************************************************ /*!\page bitwise_and Bitwise AND // // \n \section vector_vector_bitand Vector/Vector Bitwise AND // <hr> // // Via the bitwise AND operator (i.e. operator&()) it is possible to perform an elementwise // bitwise AND with dense vectors: \code blaze::DynamicVector<unsigned int> v1( 5UL ), v3; blaze::DynamicVector<unsigned short> v2( 5UL ); // ... Initializing the vectors v3 = v1 & v2; // Elementwise bitwise AND of two dense column vectors of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to use vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<unsigned int,columnVector> v1( 5UL ); blaze::DynamicVector<unsigned int,rowVector> v2( 5UL ); v1 & v2; // Compilation error: Cannot AND a column vector and a row vector v1 & trans( v2 ); // OK: Bitwise AND of two column vectors \endcode // Furthermore, it is possible to use different element types in the two vector operands, but a // bitwise AND of two vectors with the same element type is favorable due to possible vectorization // of the operation: \code blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 & v2; // Vectorized bitwise AND of an unsigned int vector \endcode // \n \section matrix_matrix_bitand Matrix/Matrix Bitwise AND // <hr> // // The bitwise AND operator (i.e. operator&()) can also be used to perform an elementwise bitwise // AND with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 & M2; // Elementwise bitwise AND of two dense matrices of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. The same argument holds for the element type: While it is // possible to use matrices with different element type, using two matrices with the same element // type potentially leads to better performance due to vectorization of the operation. \code blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 & M2; // Vectorized bitwise AND of two row-major, unsigned int dense matrices \endcode // \n \section scalar_bitand Scalar Bitwise AND // <hr> // // Is is also possible to perform a bitwise AND between a dense vector or dense matrix and a // scalar value, which has the same effect as performing a bitwise AND by means of a uniform // vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix). // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Examples: \code blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U }; // Perform a bitwise AND with all elements of v1; Results in // // ( 3, 2, 1, 0, 1, 2 ) // blaze::DynamicVector<int> v2( v1 & 3U ); \endcode \code blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U }, { 4U, 1U, 6U } }; // Perform a bitwise AND with all elements of M1; Results in // // ( 3, 2, 1 ) // ( 0, 1, 2 ) // blaze::DynamicMatrix<unsigned int> M2( M1 & 3U ); \endcode // \n Previous: \ref bitwise_shift &nbsp; &nbsp; Next: \ref bitwise_or */ //************************************************************************************************* //**Bitwise OR************************************************************************************* /*!\page bitwise_or Bitwise OR // // \n \section vector_vector_bitor Vector/Vector Bitwise OR // <hr> // // Via the bitwise OR operator (i.e. operator|()) it is possible to perform an elementwise // bitwise OR with dense vectors: \code blaze::DynamicVector<unsigned int> v1( 5UL ), v3; blaze::DynamicVector<unsigned short> v2( 5UL ); // ... Initializing the vectors v3 = v1 | v2; // Elementwise bitwise OR of two dense column vectors of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to use vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<unsigned int,columnVector> v1( 5UL ); blaze::DynamicVector<unsigned int,rowVector> v2( 5UL ); v1 | v2; // Compilation error: Cannot OR a column vector and a row vector v1 | trans( v2 ); // OK: Bitwise OR of two column vectors \endcode // Furthermore, it is possible to use different element types in the two vector operands, but a // bitwise OR of two vectors with the same element type is favorable due to possible vectorization // of the operation: \code blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 | v2; // Vectorized bitwise OR of an unsigned int vector \endcode // \n \section matrix_matrix_bitor Matrix/Matrix Bitwise OR // <hr> // // The bitwise OR operator (i.e. operator|()) can also be used to perform an elementwise bitwise // OR with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 | M2; // Elementwise bitwise OR of two dense matrices of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. The same argument holds for the element type: While it is // possible to use matrices with different element type, using two matrices with the same element // type potentially leads to better performance due to vectorization of the operation. \code blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 | M2; // Vectorized bitwise OR of two row-major, unsigned int dense matrices \endcode // \n \section scalar_bitor Scalar Bitwise OR // <hr> // // Is is also possible to perform a bitwise OR between a dense vector or dense matrix and a // scalar value, which has the same effect as performing a bitwise OR by means of a uniform // vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix). // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Examples: \code blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U }; // Perform a bitwise OR with all elements of v1; Results in // // ( 3, 3, 7, 7, 3, 3 ) // blaze::DynamicVector<int> v2( v1 | 3U ); \endcode \code blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U }, { 4U, 1U, 6U } }; // Perform a bitwise OR with all elements of M1; Results in // // ( 3, 3, 7 ) // ( 7, 3, 3 ) // blaze::DynamicMatrix<unsigned int> M2( M1 | 3U ); \endcode // \n Previous: \ref bitwise_and &nbsp; &nbsp; Next: \ref bitwise_xor */ //************************************************************************************************* //**Bitwise XOR************************************************************************************ /*!\page bitwise_xor Bitwise XOR // // \n \section vector_vector_bitxor Vector/Vector Bitwise XOR // <hr> // // Via the bitwise XOR operator (i.e. operator^()) it is possible to perform an elementwise // bitwise XOR with dense vectors: \code blaze::DynamicVector<unsigned int> v1( 5UL ), v3; blaze::DynamicVector<unsigned short> v2( 5UL ); // ... Initializing the vectors v3 = v1 ^ v2; // Elementwise bitwise XOR of two dense column vectors of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to use vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<unsigned int,columnVector> v1( 5UL ); blaze::DynamicVector<unsigned int,rowVector> v2( 5UL ); v1 ^ v2; // Compilation error: Cannot XOR a column vector and a row vector v1 ^ trans( v2 ); // OK: Bitwise XOR of two column vectors \endcode // Furthermore, it is possible to use different element types in the two vector operands, but a // bitwise XOR of two vectors with the same element type is favorable due to possible vectorization // of the operation: \code blaze::DynamicVector<unsigned int> v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 ^ v2; // Vectorized bitwise XOR of an unsigned int vector \endcode // \n \section matrix_matrix_bitxor Matrix/Matrix Bitwise XOR // <hr> // // The bitwise XOR operator (i.e. operator^()) can also be used to perform an elementwise bitwise // XOR with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<unsigned int,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<unsigned short,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 ^ M2; // Elementwise bitwise XOR of two dense matrices of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. The same argument holds for the element type: While it is // possible to use matrices with different element type, using two matrices with the same element // type potentially leads to better performance due to vectorization of the operation. \code blaze::DynamicMatrix<unsigned int> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 ^ M2; // Vectorized bitwise XOR of two row-major, unsigned int dense matrices \endcode // \n \section scalar_bitxor Scalar Bitwise XOR // <hr> // // Is is also possible to perform a bitwise XOR between a dense vector or dense matrix and a // scalar value, which has the same effect as performing a bitwise XOR by means of a uniform // vector or matrix (see \ref vector_types_uniform_vector and \ref matrix_types_uniform_matrix). // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Examples: \code blaze::DynamicVector<unsigned int> v1{ 3U, 2U, 5U, 4U, 1U, 6U }; // Perform a bitwise XOR with all elements of v1; Results in // // ( 0, 1, 6, 7, 2, 5 ) // blaze::DynamicVector<int> v2( v1 ^ 3U ); \endcode \code blaze::DynamicMatrix<unsigned int> M1{ { 3U, 2U, 5U }, { 4U, 1U, 6U } }; // Perform a bitwise XOR with all elements of M1; Results in // // ( 0, 1, 6 ) // ( 7, 2, 5 ) // blaze::DynamicMatrix<unsigned int> M2( M1 ^ 3U ); \endcode // \n Previous: \ref bitwise_or &nbsp; &nbsp; Next: \ref logical_operations */ //************************************************************************************************* //**Logical Operations***************************************************************************** /*!\page logical_operations Logical Operations // // \tableofcontents // // // \b Blaze provides the following logical operations for vectors and matrices: // // <ul> // <li> \ref logical_not // <ul> // <li> \ref vector_vector_not </li> // <li> \ref matrix_matrix_not </li> // </ul> // </li> // <li> \ref logical_and // <ul> // <li> \ref vector_vector_and </li> // <li> \ref matrix_matrix_and </li> // </ul> // </li> // <li> \ref logical_or // <ul> // <li> \ref vector_vector_or </li> // <li> \ref matrix_matrix_or </li> // </ul> // </li> // </ul> // // \n Previous: \ref bitwise_xor &nbsp; &nbsp; Next: \ref logical_not */ //************************************************************************************************* //**Logical NOT************************************************************************************ /*!\page logical_not Logical NOT // // \n \section vector_vector_not Vector/Vector Logical NOT // <hr> // // Via the logical NOT operator (i.e. operator!()) it is possible to compute an elementwise // logical NOT of a dense vector: \code blaze::DynamicVector<bool> v1( 5UL ), v2; // ... Initializing the vectors v2 = !v1; // Elementwise logical NOT of a dense column vector \endcode // \n \section matrix_matrix_not Matrix/Matrix Logical NOT // <hr> // // The logical NOT operator (i.e. operator!()) can also be used to compute an elementwise logical // NOT with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<bool,rowMajor> M1( 7UL, 3UL ), M2; // ... Initializing the matrices M2 = !M1; // Elementwise logical NOT of a dense row-major matrix \endcode // \n Previous: \ref logical_operations &nbsp; &nbsp; Next: \ref logical_and */ //************************************************************************************************* //**Logical AND************************************************************************************ /*!\page logical_and Logical AND // // \n \section vector_vector_and Vector/Vector Logical AND // <hr> // // Via the logical AND operator (i.e. operator&&()) it is possible to compute an elementwise // logical AND with dense vectors: \code blaze::DynamicVector<bool> v1( 5UL ), v3; blaze::DynamicVector<bool> v2( 5UL ); // ... Initializing the vectors v3 = v1 && v2; // Elementwise logical AND of two dense column vectors \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to use vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<bool,columnVector> v1( 5UL ); blaze::DynamicVector<bool,rowVector> v2( 5UL ); v1 && v2; // Compilation error: Cannot AND a column vector and a row vector v1 && trans( v2 ); // OK: Logical AND of two column vectors \endcode // \n \section matrix_matrix_and Matrix/Matrix Logical AND // <hr> // // The logical AND operator (i.e. operator&&()) can also be used to compute an elementwise logical // AND with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<bool,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<bool,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 && M2; // Elementwise logical AND of two dense matrices \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. // // \n Previous: \ref logical_not &nbsp; &nbsp; Next: \ref logical_or */ //************************************************************************************************* //**Logical OR************************************************************************************* /*!\page logical_or Logical OR // // \n \section vector_vector_or Vector/Vector Logical OR // <hr> // // Via the logical OR operator (i.e. operator||()) it is possible to perform an elementwise // logical OR with dense vectors: \code blaze::DynamicVector<bool> v1( 5UL ), v3; blaze::DynamicVector<bool> v2( 5UL ); // ... Initializing the vectors v3 = v1 || v2; // Elementwise logical OR of two dense column vectors \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that it is only possible to use vectors with // the same transpose flag: \code using blaze::columnVector; using blaze::rowVector; blaze::DynamicVector<unsigned int,columnVector> v1( 5UL ); blaze::DynamicVector<unsigned int,rowVector> v2( 5UL ); v1 || v2; // Compilation error: Cannot OR a column vector and a row vector v1 || trans( v2 ); // OK: Logical OR of two column vectors \endcode // \n \section matrix_matrix_or Matrix/Matrix Logical OR // <hr> // // The logical OR operator (i.e. operator||()) can also be used to perform an elementwise logical // OR with dense matrices: \code using blaze::rowMajor; using blaze::columnMajor; blaze::DynamicMatrix<bool,columnMajor> M1( 7UL, 3UL ); blaze::DynamicMatrix<bool,rowMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 || M2; // Elementwise logical OR of two dense matrices \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. It is possible to use any combination of row-major and // column-major matrices. Note however that in favor of performance using two matrices with the // same storage order is favorable. // // \n Previous: \ref logical_and &nbsp; &nbsp; Next: \ref shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization****************************************************************** /*!\page shared_memory_parallelization Shared Memory Parallelization // // For all possible operations \b Blaze tries to achieve maximum performance on a single CPU // core. However, today's CPUs are not single core anymore, but provide several (homogeneous // or heterogeneous) compute cores. In order to fully exploit the performance potential of a // multicore CPU, computations have to be parallelized across all available cores of a CPU. // For this purpose, \b Blaze provides four different shared memory parallelization techniques: // // - \ref hpx_parallelization // - \ref cpp_threads_parallelization // - \ref boost_threads_parallelization // - \ref openmp_parallelization // // When any of the shared memory parallelization techniques is activated, all arithmetic // operations on dense vectors and matrices (including additions, subtractions, multiplications, // divisions, and all componentwise arithmetic operations) and most operations on sparse vectors // and matrices are automatically run in parallel. However, in addition, \b Blaze provides means // to enforce the serial execution of specific operations: // // - \ref serial_execution // // \n Previous: \ref logical_or &nbsp; &nbsp; Next: \ref hpx_parallelization */ //************************************************************************************************* //**HPX Parallelization**************************************************************************** /*!\page hpx_parallelization HPX Parallelization // // \tableofcontents // // // The first shared memory parallelization provided with \b Blaze is based on // <a href="http://stellar.cct.lsu.edu/projects/hpx/">HPX</a>. // // // \n \section hpx_setup HPX Setup // <hr> // // In order to enable the HPX-based parallelization, the following steps have to be taken: First, // the \c BLAZE_USE_HPX_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_HPX_THREADS ... \endcode // Second, the HPX library and depending libraries such as Boost, hwloc, etc. have to be linked. // And third, the HPX threads have to be initialized by a call to the \c hpx::init() function (see // the <a href="http://stellar.cct.lsu.edu/files/hpx_0.9.0/docs/hpx/tutorial.html">HPX tutorial</a> // for further details). These three actions will cause the \b Blaze library to automatically try // to run all operations in parallel with the specified number of HPX threads. // // Note that the HPX-based parallelization has priority over the OpenMP-based, C++11 thread-based, // and Boost thread-based parallelizations, i.e. is preferred in case multiple parallelizations // are enabled in combination with the HPX thread parallelization. // // The number of threads used by the HPX backend has to be specified via the command line: \code ... --hpx:threads 4 ... \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of HPX threads, the function will return the actual number of threads used by // the HPX subsystem. // // // \n \section hpx_configuration HPX Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see for instance \ref openmp_parallelization). Only in case a given // operation is large enough and exceeds a certain threshold the operation is executed in parallel. // All thresholds related to the HPX-based parallelization are contained within the configuration // file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the HPX-based parallelization. // // \n Previous: \ref shared_memory_parallelization &nbsp; &nbsp; Next: \ref cpp_threads_parallelization */ //************************************************************************************************* //**C++11 Thread Parallelization******************************************************************* /*!\page cpp_threads_parallelization C++11 Thread Parallelization // // \tableofcontents // // // In addition to the HPX-based shared memory parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a shared memory parallelization based on C++11 threads. // // // \n \section cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the C++11 thread-based parallelization, first the according C++11-specific // compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument // has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the // compiler flags have to be extended by \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of C++11 threads. Note that in case both HPX and C++11 // threads are enabled on the command line, the HPX-based parallelization has priority and is // preferred. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of C++11 threads, the function will return the previously specified number of // threads. // // // \n \section cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an // operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for // the overall performance, the operation is executed serially. One of the main reasons for not // executing an operation in parallel is the size of the operands. For instance, a vector addition // is only executed in parallel if the size of both vector operands exceeds a certain threshold. // Otherwise, the performance could seriously decrease due to the overhead caused by the thread // setup. However, in order to be able to adjust the \b Blaze library to a specific system, it // is possible to configure these thresholds manually. All thresholds are contained within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the C++11 thread parallelization. // // // \n \section cpp_threads_known_issues Known Issues // <hr> // // There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if their destructor is executed after the \c main() function: // // http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug. // In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function, // which can be used to manually destroy all threads at the end of the \c main() function: \code int main() { // ... Using the C++11 thread parallelization of Blaze shutDownThreads(); } \endcode // Please note that this function may only be used at the end of the \c main() function. After // this function no further computation may be executed! Also note that this function has an // effect for Visual Studio compilers only and doesn't need to be used with any other compiler. // // \n Previous: \ref hpx_parallelization &nbsp; &nbsp; Next: \ref boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization******************************************************************* /*!\page boost_threads_parallelization Boost Thread Parallelization // // \tableofcontents // // // The third available shared memory parallelization provided with \b Blaze is based // on <a href="https://www.boost.org/doc/libs/1_68_0/doc/html/thread.html">Boost threads</a>. // // // \n \section boost_threads_setup Boost Thread Setup // <hr> // // In order to enable the Boost thread-based parallelization, two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode // Second, the according Boost libraries have to be linked. These two simple actions will cause // the \b Blaze library to automatically try to run all operations in parallel with the specified // number of Boost threads. Note that the HPX-based and C++11 thread-based parallelizations have // priority, i.e. are preferred in case either is enabled in combination with the Boost thread // parallelization. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of Boost threads, the function will return the previously specified number of // threads. // // // \n \section boost_threads_configuration Boost Thread Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization). // All thresholds related to the Boost thread parallelization are also contained within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the Boost thread parallelization. // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization************************************************************************* /*!\page openmp_parallelization OpenMP Parallelization // // \tableofcontents // // // The fourth and final shared memory parallelization provided with \b Blaze is based on // <a href="https://www.openmp.org">OpenMP</a>. // // // \n \section openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify // the use of OpenMP on the command line: \code -fopenmp // GNU/Clang C++ compiler -openmp // Intel C++ compiler /openmp // Visual Studio \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of threads. Note however that the HPX-based, the C++11 // thread-based, and the Boost thread-based parallelizations have priority, i.e. are preferred in // case either is enabled in combination with the OpenMP thread parallelization. // // As common for OpenMP, the number of threads can be specified either via an environment variable \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // Windows systems \endcode // or via an explicit call to the \c omp_set_num_threads() function: \code omp_set_num_threads( 4 ); \endcode // Alternatively, the number of threads can also be specified via the \c setNumThreads() function // provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of OpenMP, the function returns the maximum number of threads OpenMP will use // within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function. // // // \n \section openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze // deems the parallel execution as counterproductive for the overall performance, the operation // is executed serially. One of the main reasons for not executing an operation in parallel is // the size of the operands. For instance, a vector addition is only executed in parallel if the // size of both vector operands exceeds a certain threshold. Otherwise, the performance could // seriously decrease due to the overhead caused by the thread setup. However, in order to be // able to adjust the \b Blaze library to a specific system, it is possible to configure these // thresholds manually. All shared memory thresholds are contained within the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique (see also \ref cpp_threads_parallelization and // \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum // performance for all possible situations and configurations. They merely provide a reasonable // standard for the current CPU generation. // // // \n \section openmp_first_touch First Touch Policy // <hr> // // So far the \b Blaze library does not (yet) automatically initialize dynamic memory according // to the first touch principle. Consider for instance the following vector triad example: \code using blaze::columnVector; const size_t N( 1000000UL ); blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // Performing a vector triad a = b + c * d; \endcode // If this code, which is prototypical for many OpenMP applications that have not been optimized // for ccNUMA architectures, is run across several locality domains (LD), it will not scale // beyond the maximum performance achievable on a single LD if the working set does not fit into // the cache. This is because the initialization loop is executed by a single thread, writing to // \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will // be mapped into a single LD. // // As mentioned above, this problem can be solved by performing vector initialization in parallel: \code // ... // Initialization of the vectors b, c, and d #pragma omp parallel for for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // ... \endcode // This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for // instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in // order to achieve the maximum possible performance, it is imperative to initialize the memory // according to the later use of the data structures. // // // \n \section openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // There are a few important limitations to the current \b Blaze OpenMP parallelization. The first // one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the // other one the OpenMP \c sections directive (see \ref openmp_sections). // // // \n \subsection openmp_parallel The Parallel Directive // // In OpenMP threads are explicitly spawned via the an OpenMP parallel directive: \code // Serial region, executed by a single thread #pragma omp parallel { // Parallel region, executed by the specified number of threads } // Serial region, executed by a single thread \endcode // Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a // parallel directive is encountered. Therefore, from a performance point of view, it seems to be // beneficial to use a single OpenMP parallel directive for several operations: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; #pragma omp parallel { y1 = A * x; y2 = B * x; } \endcode // Unfortunately, this optimization approach is not allowed within the \b Blaze library. More // explicitly, it is not allowed to put an operation into a parallel region. The reason is that // the entire code contained within a parallel region is executed by all threads. Although this // appears to just comprise the contained computations, a computation (or more specifically the // assignment of an expression to a vector or matrix) can contain additional logic that must not // be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.). // Therefore it is not possible to manually start a parallel region for several operations, but // \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand // and the given operands. // // \n \subsection openmp_sections The Sections Directive // // OpenMP provides several work-sharing construct to distribute work among threads. One of these // constructs is the \c sections directive: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = A * x; #pragma omp section y2 = B * x; } \endcode // In this example, two threads are used to compute two distinct matrix/vector multiplications // concurrently. Thereby each of the \c sections is executed by exactly one thread. // // Unfortunately \b Blaze does not support concurrent parallel computations and therefore this // approach does not work with any of the \b Blaze parallelization techniques. All techniques // (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization // and \ref boost_threads_parallelization) are optimized for the parallel computation of an // operation within a single thread of execution. This means that \b Blaze tries to use all // available threads to compute the result of a single operation as efficiently as possible. // Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations // and to let \b Blaze compute all operations within a \c sections directive in serial. This can // be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution) // or by selectively serializing all operations within a \c sections directive via the \c serial() // function: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = serial( A * x ); #pragma omp section y2 = serial( B * x ); } \endcode // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does // NOT work in this context! // // \n Previous: \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref serial_execution */ //************************************************************************************************* //**Serial Execution******************************************************************************* /*!\page serial_execution Serial Execution // // Sometimes it may be necessary to enforce the serial execution of specific operations. For this // purpose, the \b Blaze library offers three possible options: the serialization of a single // expression via the \c serial() function, the serialization of a block of expressions via the // \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution. // // // \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression // <hr> // // The first option is the serialization of a specific operation via the \c serial() function: \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and initialization C = serial( A + B ); \endcode // \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any // kind of dense or sparse vector or matrix expression. // // // \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions // <hr> // // The second option is the temporary and local enforcement of a serial execution via the // \c BLAZE_SERIAL_SECTION: \code using blaze::rowMajor; using blaze::columnVector; blaze::DynamicMatrix<double,rowMajor> A; blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; // ... Resizing and initialization // Parallel execution // If possible and beneficial for performance the following operation is executed in parallel. x = A * b; // Serial execution // All operations executed within the serial section are guaranteed to be executed in // serial (even if a parallel execution would be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * d; } // Parallel execution continued // ... \endcode // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial. // Outside the scope of the serial section, all operations are run in parallel (if beneficial for // the performance). // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution. // The use of the serial section within several concurrent threads will result undefined behavior! // // // \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution // <hr> // // The third option is the general deactivation of the parallel execution (even in case OpenMP is // enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the <tt>./blaze/config/SMP.h</tt> configuration file: \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory // parallelization is deactivated altogether. // // \n Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref serialization */ //************************************************************************************************* //**Serialization********************************************************************************** /*!\page serialization Serialization // // Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing // results or for sharing specific setups with other people. The \b Blaze math serialization // module provides the according functionality to create platform independent, portable, binary // representations of vectors and matrices that can be used to store the \b Blaze data structures // without loss of precision and to reliably transfer them from one machine to another. // // The following two pages explain how to serialize vectors and matrices: // // - \ref vector_serialization // - \ref matrix_serialization // // \n Previous: \ref serial_execution &nbsp; &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization*************************************************************************** /*!\page vector_serialization Vector Serialization // // The following example demonstrates the (de-)serialization of dense and sparse vectors: \code using blaze::columnVector; using blaze::rowVector; // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> d; blaze::CompressedVector<int,columnVector> s; // ... Resizing and initialization // Creating an archive that writes into a the file "vectors.blaze" blaze::Archive<std::ofstream> archive( "vectors.blaze" ); // Serialization of both vectors into the same archive. Note that d lies before s! archive << d << s; } // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> d1; blaze::DynamicVector<int,rowVector> d2; // Creating an archive that reads from the file "vectors.blaze" blaze::Archive<std::ifstream> archive( "vectors.blaze" ); // Reconstituting the former d vector into d1. Note that it is possible to reconstitute // the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that // the type of elements has to be the same. archive >> d1; // Reconstituting the former s vector into d2. Note that is is even possible to reconstitute // a sparse vector as a dense vector (also the reverse is possible) and that a column vector // can be reconstituted as row vector (and vice versa). Note however that also in this case // the type of elements is the same! archive >> d2 } \endcode // The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can // also be used for vectors with vector or matrix element type: \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // ... Resizing and initialization // Creating an archive that writes into a the file "vector.blaze" blaze::Archive<std::ofstream> archive( "vector.blaze" ); // Serialization of the vector into the archive archive << vec; } // Deserialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // Creating an archive that reads from the file "vector.blaze" blaze::Archive<std::ifstream> archive( "vector.blaze" ); // Reconstitution of the vector from the archive archive >> vec; } \endcode // As the examples demonstrates, the vector serialization offers an enormous flexibility. However, // several actions result in errors: // // - vectors cannot be reconstituted as matrices (and vice versa) // - the element type of the serialized and reconstituted vector must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticVector, its size must match the size of the serialized vector // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref serialization &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization*************************************************************************** /*!\page matrix_serialization Matrix Serialization // // The serialization of matrices works in the same manner as the serialization of vectors. The // following example demonstrates the (de-)serialization of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; // Serialization of both matrices { blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; blaze::CompressedMatrix<int,columnMajor> S; // ... Resizing and initialization // Creating an archive that writes into a the file "matrices.blaze" blaze::Archive<std::ofstream> archive( "matrices.blaze" ); // Serialization of both matrices into the same archive. Note that D lies before S! archive << D << S; } // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> D1; blaze::DynamicMatrix<int,rowMajor> D2; // Creating an archive that reads from the file "matrices.blaze" blaze::Archive<std::ifstream> archive( "matrices.blaze" ); // Reconstituting the former D matrix into D1. Note that it is possible to reconstitute // the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that // the type of elements has to be the same. archive >> D1; // Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute // a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major // matrix can be reconstituted as row-major matrix (and vice versa). Note however that also // in this case the type of elements is the same! archive >> D2 } \endcode // Note that also in case of matrices it is possible to (de-)serialize matrices with vector or // matrix elements: \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // ... Resizing and initialization // Creating an archive that writes into a the file "matrix.blaze" blaze::Archive<std::ofstream> archive( "matrix.blaze" ); // Serialization of the matrix into the archive archive << mat; } // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // Creating an archive that reads from the file "matrix.blaze" blaze::Archive<std::ifstream> archive( "matrix.blaze" ); // Reconstitution of the matrix from the archive archive >> mat; } \endcode // Note that just as the vector serialization, the matrix serialization is restricted by a // few important rules: // // - matrices cannot be reconstituted as vectors (and vice versa) // - the element type of the serialized and reconstituted matrix must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticMatrix, the number of rows and columns must match those // of the serialized matrix // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref vector_serialization &nbsp; &nbsp; Next: \ref customization \n */ //************************************************************************************************* //**Customization********************************************************************************** /*!\page customization Customization // // Although \b Blaze tries to work out of the box for every possible setting, still it may be // necessary to adapt the library to specific requirements. The following three pages explain // how to customize the \b Blaze library to your own needs: // // - \ref configuration_files // - \ref vector_and_matrix_customization // - \ref grouping_tagging // - \ref error_reporting_customization // // \n Previous: \ref matrix_serialization &nbsp; &nbsp; Next: \ref configuration_files */ //************************************************************************************************* //**Configuration Files**************************************************************************** /*!\page configuration_files Configuration Files // // \tableofcontents // // // Sometimes it is necessary to adapt \b Blaze to specific requirements. For this purpose // \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample opportunity to customize internal settings, behavior, and thresholds. // This chapter explains the most important of these configuration files. For a complete // overview of all customization opportunities, please go to the configuration files in the // <tt>./blaze/config/</tt> subdirectory or see the complete \b Blaze documentation. // // // \n \section transpose_flag Default Vector Storage // <hr> // // The \b Blaze default is that all vectors are created as column vectors (if not specified // explicitly): \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector \endcode // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default // vector storage (i.e. the default transpose flag) of all vectors within the \b Blaze library. // The default transpose flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro: \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector \endcode // Alternatively the default transpose flag can be specified via command line or by or defining // this symbol manually before including any \b Blaze header file: \code g++ ... -DBLAZE_DEFAULT_TRANSPOSE_FLAG=blaze::columnVector ... \endcode \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector and blaze::columnVector. // // // \n \section storage_order Default Matrix Storage // <hr> // // Matrices are by default created as row-major matrices: \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix \endcode // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default // matrix storage order. Via the \c BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order // for all matrices of the \b Blaze library can be specified. \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor \endcode // Alternatively the default storage order can be specified via command line or by or defining // this symbol manually before including any \b Blaze header file: \code g++ ... -DBLAZE_DEFAULT_STORAGE_ORDER=blaze::rowMajor ... \endcode \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can // be configured to use a BLAS library. Via the following compilation switch in the configuration // file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: \code #define BLAZE_BLAS_MODE 1 \endcode // By default, \b Blaze assumes a 32-bit BLAS library. Via the \c BLAZE_BLAS_IS_64BIT compilation // switch, the 64-bit BLAS mode can be selected: \code #define BLAZE_BLAS_IS_64BIT 1 \endcode // Note that the \c BLAZE_BLAS_IS_64BIT switch also has an effect on the \ref lapack_functions. // Please also note that it might additionally be necessary to use a compilation switch to put // the BLAS/LAPACK library into 64-bit mode (e.g. \c MKL_ILP64 for the Intel MKL library). // // In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze from parallelizing on its own: \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode // Additionally, it is possible to specify the name of the BLAS include file via the // \c BLAZE_BLAS_INCLUDE_FILE switch. The default setting is <tt><cblas.h></tt>: \code #define BLAZE_BLAS_INCLUDE_FILE <cblas.h> \endcode // Alternatively, all settings can be specified via command line or by or defining the symbols // manually before including any \b Blaze header file: \code g++ ... -DBLAZE_BLAS_MODE=1 -DBLAZE_BLAS_IS_64BIT=1 -DBLAZE_BLAS_IS_PARALLEL=1 -DBLAZE_BLAS_INCLUDE_FILE='<cblas.h>' ... \endcode \code #define BLAZE_BLAS_MODE 1 #define BLAZE_BLAS_IS_64BIT 1 #define BLAZE_BLAS_IS_PARALLEL 1 #define BLAZE_BLAS_INCLUDE_FILE <cblas.h> #include <blaze/Blaze.h> \endcode // In case no BLAS library is available, \b Blaze will still work and will not be reduced in // functionality, but performance may be limited. // // // \n \section cache_size Cache Size // <hr> // // The optimization of several \b Blaze compute kernels depends on the cache size of the target // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal // speed the exact cache size of the system should be provided via the \c cacheSize value in the // <tt>./blaze/config/CacheSize.h</tt> configuration file: \code #define BLAZE_CACHE_SIZE 3145728UL; \endcode // The cache size can also be specified via command line or by defining this symbol manually // before including any \b Blaze header file: \code g++ ... -DBLAZE_CACHE_SIZE=3145728 ... \endcode \code #define BLAZE_CACHE_SIZE 3145728 #include <blaze/Blaze.h> \endcode // \n \section vectorization Vectorization // <hr> // // In order to achieve maximum performance and to exploit the compute power of a target platform // the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or // AVX-512 intrinsics, depending on which instruction set is available. However, it is possible // to disable the vectorization entirely by the compile time switch in the configuration file // <tt>./blaze/config/Vectorization.h</tt>: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code g++ ... -DBLAZE_USE_VECTORIZATION=1 ... \endcode \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is // disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for // the operations. Note that deactivating the vectorization may pose a severe performance // limitation for a large number of operations! // // // \n \section sleef Sleef // <hr> // // For several complex operations \b Blaze can make use of the Sleef library for vectorization // (https://github.com/shibatch/sleef). This compilation switch enables/disables the vectorization // by means of Sleef. In case the switch is set to 1, \b Blaze uses Sleef for instance for the // vectorized computation of trigonometric functions (i.e. \c sin(), \c cos(), \c tan(), etc.) // and exponential functions (i.e. \c exp(), \c log(), ...). \code #define BLAZE_USE_SLEEF 1 \endcode // It is also possible to enable/disable Sleef vectorization via command line or by defining this // symbol manually before including any \b Blaze header file: \code g++ ... -DBLAZE_USE_SLEEF=1 ... \endcode \code #define BLAZE_USE_SLEEF 1 #include <blaze/Blaze.h> \endcode // \n \section thresholds Thresholds // <hr> // // For many computations \b Blaze distinguishes between small and large vectors and matrices. // This separation is especially important for the parallel execution of computations, since // the use of several threads only pays off for sufficiently large vectors and matrices. // Additionally, it also enables \b Blaze to select kernels that are optimized for a specific // size. // // In order to distinguish between small and large data structures \b Blaze provides several // thresholds that can be adapted to the characteristics of the target platform. For instance, // the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom // \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels // for large multiplications. All thresholds, including the thresholds for the OpenMP- and // thread-based parallelization, are contained within the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // // \n \section alignment Alignment // <hr> // // For performance reasons, the vector types \ref vector_types_static_vector and // \ref vector_types_hybrid_vector and the matrix types \ref matrix_types_static_matrix and // \ref matrix_types_hybrid_matrix by default make use of aligned memory. Via the configuration // file <tt>./blaze/config/Alignment.h</tt> it is possible to define the default alignment flag: \code #define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned \endcode // Alternatively it is possible set the default alignment flag via command line or by defining // this symbol manually before including any \b Blaze header file: \code g++ ... -DBLAZE_DEFAULT_ALIGNMENT_FLAG=blaze::aligned ... \endcode \code #define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned #include <blaze/Blaze.h> \endcode // If \c BLAZE_DEFAULT_ALIGNMENT_FLAG is set to \c blaze::aligned then \ref vector_types_static_vector, // \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix // use aligned memory by default. If it is set to \c blaze::unaligned they don't enforce aligned // memory. Note however that disabling alignment can considerably reduce the performance of all // operations with these vector and matrix types! // // // \n \section padding Padding // <hr> // // By default the \b Blaze library uses padding for the vector types \ref vector_types_static_vector // and \ref vector_types_hybrid_vector and the matrix types \ref matrix_types_static_matrix and // \ref matrix_types_hybrid_matrix in order to achieve maximum performance in all operations. Due // to padding, the proper alignment of data elements can be guaranteed and the need for remainder // loops is minimized. However, on the downside padding introduces an additional memory overhead, // which can be large depending on the used data type. // // The configuration file <tt>./blaze/config/Padding.h</tt> provides a compile time switch that // can be used to define the default padding flag: \code #define BLAZE_DEFAULT_PADDING_FLAG blaze::padded \endcode // Alternatively it is possible to define the default padding flag via command line or by defining // this symbol manually before including any \b Blaze header file: \code g++ ... -DBLAZE_DEFAULT_PADDING_FLAG=blaze::padded ... \endcode \code #define BLAZE_DEFAULT_PADDING_FLAG blaze::padded #include <blaze/Blaze.h> \endcode // If \c BLAZE_DEFAULT_ALIGNMENT_FLAG is set to \c blaze::padded, by default padding is enabled // for \ref vector_types_static_vector, \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix // and \ref matrix_types_hybrid_matrix. If it is set to \c blaze::unpadded, then padding is by // default disabled. Note however that disabling padding can considerably reduce the performance // of all dense vector and matrix operations! // // // \n \section streaming Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide // a significant performance advantage of about 20%. However, this advantage is only in effect in // case the memory bandwidth of the target architecture is maxed out. If the target architecture's // memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance // instead of increasing it. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate streaming: \code #define BLAZE_USE_STREAMING 1 \endcode // Alternatively streaming can be (de-)activated via command line or by defining this symbol // manually before including any \b Blaze header file: \code g++ ... -DBLAZE_USE_STREAMING=1 ... \endcode \code #define BLAZE_USE_STREAMING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_STREAMING is set to 1 streaming is enabled, if it is set to 0 streaming is // disabled. It is recommended to consult the target architecture's white papers to decide whether // streaming is beneficial or hurtful for performance. // // // \n Previous: \ref customization &nbsp; &nbsp; Next: \ref vector_and_matrix_customization \n */ //************************************************************************************************* //**Customization of Vectors and Matrices********************************************************** /*!\page vector_and_matrix_customization Customization of Vectors and Matrices // // \tableofcontents // // // \n \section custom_data_members Custom Data Members // <hr> // // So far the \b Blaze library does not provide a lot of flexibility to customize the data // members of existing \ref vector_types and \ref matrix_types. However, to some extend it is // possible to customize vectors and matrices by inheritance. The following example gives an // impression on how to create a simple variation of \ref matrix_types_custom_matrix, which // automatically takes care of acquiring and releasing custom memory. \code template< typename Type // Data type of the matrix , bool SO = defaultStorageOrder > // Storage order class MyCustomMatrix : public CustomMatrix< Type, unaligned, unpadded, SO > { public: explicit inline MyCustomMatrix( size_t m, size_t n ) : CustomMatrix<Type,unaligned,unpadded,SO>() , array_( new Type[m*n] ) { this->reset( array_.get(), m, n ); } private: std::unique_ptr<Type[]> array_; }; \endcode // Please note that this is a simplified example with the intent to show the general approach. // The number of constructors, the memory acquisition, and the kind of memory management can of // course be adapted to specific requirements. Also, please note that since none of the \b Blaze // vectors and matrices have virtual destructors polymorphic destruction cannot be used. // // // \n \section custom_operations Custom Operations // <hr> // // There are two approaches to extend \b Blaze with custom operations. First, the \c map() // functions provide the possibility to execute componentwise custom operations on vectors and // matrices. Second, it is possible to add customized free functions. // // \n \subsection custom_operations_map The map() Functions // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on vectors and matrices. The unary \c map() function can be used to apply a custom // operation on each single element of a dense vector or matrix or each non-zero element of a // sparse vector or matrix. For instance, the following example demonstrates a custom square // root computation on a dense matrix: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense vectors or two dense matrices. The following example demonstrates the merging of // two matrices of double precision values into a matrix of double precision complex numbers: \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; blaze::DynamicMatrix< complex<double> > cplx; // Creating the matrix // ( ( 2.1, 0.3) (-4.2, 1.4) ) // ( ( 1.0, 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex<double>( r, i ); } ); \endcode // These examples demonstrate the most convenient way of defining a unary custom operation by // passing a lambda to the \c map() function. Alternatively, it is possible to pass a custom // functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } }; B = map( A, Sqrt() ); \endcode // In order for the functor to work in a call to \c map() it must define a function call operator, // which accepts arguments of the type of the according vector or matrix elements. // // Although the operation is automatically parallelized depending on the size of the vector or // matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load() // function can be added to the functor, which handles the vectorized computation. Depending on // the data type this function is passed one of the following \b Blaze SIMD data types: // // <ul> // <li>SIMD data types for fundamental data types // <ul> // <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li> // <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li> // <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li> // <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li> // <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li> // </ul> // </li> // <li>SIMD data types for complex data types // <ul> // <li>\c blaze::SIMDcint8: Packed SIMD type for complex 8-bit signed integral data types</li> // <li>\c blaze::SIMDcuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint16: Packed SIMD type for complex 16-bit signed integral data types</li> // <li>\c blaze::SIMDcuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint32: Packed SIMD type for complex 32-bit signed integral data types</li> // <li>\c blaze::SIMDcuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint64: Packed SIMD type for complex 64-bit signed integral data types</li> // <li>\c blaze::SIMDcuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDcfloat: Packed SIMD type for complex single precision floating point data</li> // <li>\c blaze::SIMDcdouble: Packed SIMD type for complex double precision floating point data</li> // </ul> // </li> // </ul> // // All SIMD types provide the \c value data member for a direct access to the underlying intrinsic // data element. In the following example, this intrinsic element is passed to the AVX function // \c _mm256_sqrt_pd(): \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } SIMDdouble load( const SIMDdouble& a ) const { return _mm256_sqrt_pd( a.value ); } }; \endcode // In this example, whenever vectorization is generally applicable, the \c load() function is // called instead of the function call operator for as long as the number of remaining elements // is larger-or-equal to the width of the packed SIMD type. In all other cases (which also // includes peel-off and remainder loops) the scalar operation is used. // // Please note that this example has two drawbacks: First, it will only compile in case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the // availability of AVX is not taken into account. The first drawback can be alleviated by making // the \c load() function a function template. The second drawback can be dealt with by adding a // \c simdEnabled() function template to the functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } template< typename T > T load( const T& a ) const { return _mm256_sqrt_pd( a.value ); } template< typename T > static constexpr bool simdEnabled() { #if defined(__AVX__) return true; #else return false; #endif } }; \endcode // The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether // or not vectorization is available for the given data type \c T. In case the function returns // \c true, the \c load() function is used for a vectorized evaluation, in case the function // returns \c false, \c load() is neither called nor instantiated. // // By default the \c map() function uses peel-off and remainder loops if the number of elements is // not a multiple of the width of the packed SIMD type. However, all dense vector and matrix types // in \b Blaze provide padding as an optimization. In case the custom operation preserves the // value zero of the padding elements, it is possible to omit the peel-off and remainder loops, // include the padding elements in the computation and by that increase performance. For that // purpose the \c paddingEnabled() function can be added to the functor: \code struct Sqrt { // ... static constexpr bool paddingEnabled() { return true; } }; \endcode // Also the \c paddingEnabled() function must be a \c static, \c constexpr function and must // return whether padding elements can be used in the custom operation. In case the function // returns \c true, the padding elements are used during a vectorized operation, in case the // function returns \c false, the padding elements are not used. // // Note that this is a simplified example that is only working when used for dense vectors and // matrices with double precision floating point elements. The following code shows the complete // implementation of the according functor that is used within the \b Blaze library. The \b Blaze // \c Sqrt functor is working for all data types that are providing a square root operation: \code namespace blaze { struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const { return sqrt( a ); } template< typename T > static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; } static constexpr bool paddingEnabled() { return true; } template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T ); return sqrt( a ); } }; } // namespace blaze \endcode // The same approach can be taken for binary custom operations. The following code demonstrates // the \c Min functor of the \b Blaze library, which is working for all data types that provide // a \c min() operation: \code struct Min { explicit inline Min() {} template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) operator()( const T1& a, const T2& b ) const { return min( a, b ); } template< typename T1, typename T2 > static constexpr bool simdEnabled() { return HasSIMDMin<T1,T2>::value; } static constexpr bool paddingEnabled() { return true; } template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) load( const T1& a, const T2& b ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 ); BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 ); return min( a, b ); } }; \endcode // For more information on the available \b Blaze SIMD data types and functions, please see the // SIMD module in the complete \b Blaze documentation. // // \n \subsection custom_operations_free_functions Free Functions // // In order to extend \b Blaze with new functionality it is possible to add free functions. Free // functions can be used either as wrappers around calls to the map() function or to implement // general, non-componentwise operations. The following two examples will demonstrate both ideas. // // The first example shows the \c setToZero() function, which resets a sparse matrix to zero // without affecting the sparsity pattern. It is implemented as a convenience wrapper around // the map() function: \code template< typename MT // Type of the sparse matrix , bool SO > // Storage order void setToZero( blaze::SparseMatrix<MT,SO>& mat ) { (~mat) = blaze::map( ~mat, []( const auto& value ){ return decltype(value){}; } ); } \endcode // The blaze::SparseMatrix class template is the base class for all kinds of sparse matrices and // provides an abstraction from the actual type \c MT of the sparse matrix. However, due to the // <a href="https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern">Curiously Recurring Template Pattern (CRTP)</a> // it also enables a conversion back to the actual type. This downcast is performed via the tilde // operator (i.e. \c operator~()). The template parameter \c SO represents the storage order // (blaze::rowMajor or blaze::columnMajor) of the matrix. // // The second example shows the \c countZeros() function, which counts the number of values, which // are exactly zero, in a dense, row-major matrix: \code template< typename MT > size_t countZeros( blaze::DenseMatrix<MT,rowMajor>& mat ) { const size_t M( (~mat).rows() ); const size_t N( (~mat).columns() ); size_t count( 0UL ); for( size_t i=0UL; i<M; ++i ) { for( size_t j=0UL; j<N; ++j ) { if( blaze::isDefault<strict>( (~mat)(i,j) ) ) ++count; } } return count; } \endcode // The blaze::DenseMatrix class template is the base class for all kinds of dense matrices. Again, // it is possible to perform the conversion to the actual type via the tilde operator. // // The following two listings show the declarations of all vector and matrix base classes, which // can be used for custom free functions: \code template< typename VT // Concrete type of the dense or sparse vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class Vector; template< typename VT // Concrete type of the dense vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class DenseVector; template< typename VT // Concrete type of the sparse vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class SparseVector; \endcode \code template< typename MT // Concrete type of the dense or sparse matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class Matrix; template< typename MT // Concrete type of the dense matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class DenseMatrix; template< typename MT // Concrete type of the sparse matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class SparseMatrix; \endcode // \n \section custom_data_types Custom Data Types // <hr> // // \subsection custom_data_types_introduction Introduction // // The \b Blaze library is not restricted to integral, floating point and complex data types // (called numeric types in \b Blaze), but it supports custom data types. For instance, the // following example demonstrates that it is possible to use \c std::string as data type: \code blaze::DynamicVector<std::string> a{ "Hello, ", "Blaze " , "Expression" }; blaze::DynamicVector<std::string> b{ "World" , "Library", " Templates" }; const auto c( evaluate( a + b ) ); std::cout << "c =\n" << c << "\n\n"; const std::string maxString( max( c ) ); std::cout << "maxString = " << std::quoted(maxString) << "\n"; \endcode // Output: \code c = ( Hello, World ) ( Blaze Library ) ( Expression Templates ) maxString = "Hello, World" \endcode // \b Blaze tries hard to make the use of custom data types as convenient, easy and intuitive as // possible. In order to work flawlessly with \b Blaze, custom data types are required to provide // a certain interface (depending on the operations that the type is used for). The following // sections give an overview of the necessary steps to enable the use of the hypothetical custom // data type \c custom::double_t for vector and matrix operations. \code namespace custom { struct double_t { constexpr double_t() = default; constexpr double_t( double i ) : value( i ) {} double value{}; }; } // namespace custom \endcode // \subsection custom_data_types_arithmetic_operations Arithmetic Operations // // The \b Blaze library assumes that a custom data type provides \c operator<<() for streaming, // \c operator+=() and \c operator+() for additions (which for instance includes additions inside // matrix/vector multiplications, matrix/matrix multiplications, reduction or norm operations), // \c operator-=() and \c operator-() for subtractions, \c operator*=() and \c operator*() for // multiplications and \c operator/=() and \c operator/() for divisions: \code namespace custom { constexpr double_t& operator+=( double_t& lhs, double_t rhs ) noexcept { lhs.value += rhs.value; return lhs; } constexpr double_t& operator-=( double_t& lhs, double_t rhs ) noexcept { lhs.value -= rhs.value; return lhs; } constexpr double_t& operator*=( double_t& lhs, double_t rhs ) noexcept { lhs.value *= rhs.value; return lhs; } constexpr double_t& operator/=( double_t& lhs, double_t rhs ) noexcept { lhs.value /= rhs.value; return lhs; } constexpr double_t operator+( double_t lhs, double_t rhs ) noexcept { return double_t{ lhs.value + rhs.value }; } constexpr double_t operator-( double_t lhs, double_t rhs ) noexcept { return double_t{ lhs.value - rhs.value }; } constexpr double_t operator*( double_t lhs, double_t rhs ) noexcept { return double_t{ lhs.value * rhs.value }; } constexpr double_t operator/( double_t lhs, double_t rhs ) noexcept { return double_t{ lhs.value / rhs.value }; } inline std::ostream& operator<<( std::ostream& os, double_t d ) { return os << d.value; } } // namespace custom \endcode // Example: \code int main() { blaze::DynamicVector<custom::double_t> a{ 1.0, 2.0, 3.0, 4.0 }; blaze::DynamicVector<custom::double_t> b{ 0.1, 0.2, 0.3, 0.4 }; std::cout << "a + b =\n" << ( a + b ) << "\n"; std::cout << "a * b =\n" << ( a * b ) << "\n"; std::cout << "sum(a) = " << sum(a) << "\n" << "prod(a) = " << prod(a) << "\n"; } \endcode // Output: \code a + b = ( 1.1 ) ( 2.2 ) ( 3.3 ) ( 4.4 ) a * b = ( 0.1 ) ( 0.4 ) ( 0.9 ) ( 1.6 ) sum(a) = 10 prod(a) = 24 \endcode // Note that similar steps are necessary if several custom data types are combined (as for instance // \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to // be taken into account: \code custom::double_t operator+( const custom::double_t& a, const custom::float_t& b ); custom::double_t operator+( const custom::float_t& a, const custom::double_t& b ); // ... \endcode // Please note that only built-in data types apply for vectorization and thus custom data types // cannot achieve maximum performance! // // \subsection custom_data_types_relational_operations Relational Operations // // In order to compare the element type, \b Blaze expects the equality operator (i.e. \c operator==()) // and the inequality operator (i.e. \c operator!=()). Alternatively it is possible to provide an // \c equal() function, which distinguishes between strict and relaxed comparison: \code namespace custom { constexpr bool operator==( double_t lhs, double_t rhs ) noexcept { return lhs.value == rhs.value; } constexpr bool operator!=( double_t lhs, double_t rhs ) noexcept { return !( lhs == rhs ); } template< blaze::RelaxationFlag RF > constexpr bool equal( double_t lhs, double_t rhs ) noexcept { return blaze::equal<RF>( lhs.value, rhs.value ); } } // namespace custom \endcode // Example: \code int main() { blaze::DynamicVector<custom::double_t> a{ 1.0, 2.0, 3.0, 4.0 }; blaze::DynamicVector<custom::double_t> b{ 0.1, 0.2, 0.3, 0.4 }; std::cout << "a == b: " << ( a == b ) << "\n" << "a != b: " << ( a != b ) << "\n"; } \endcode // Output: \code a == b: 0 a != b: 1 \endcode // \subsection custom_data_types_elementwise_operations Elementwise Operations // // For the different kinds of elementwise operations on vectors and matrices (\c abs(), \c sin(), // \c cos(), \c sqrt(), \c log(), \c exp(), \c min(), \c max(), ...), the custom type is required // to provide the according function overload. Note that the \c sqrt() operation may also be // required for several norm computations. Also, for any inversion operation, the type is required // to suport the \c inv() function: \code namespace custom { inline double_t abs ( double_t d ) noexcept { return double_t{ std::abs ( d.value ) }; } inline double_t sin ( double_t d ) noexcept { return double_t{ std::sin ( d.value ) }; } inline double_t cos ( double_t d ) noexcept { return double_t{ std::cos ( d.value ) }; } inline double_t sqrt( double_t d ) noexcept { return double_t{ std::sqrt( d.value ) }; } inline double_t log ( double_t d ) noexcept { return double_t{ std::log ( d.value ) }; } inline double_t exp ( double_t d ) noexcept { return double_t{ std::exp ( d.value ) }; } constexpr double_t inv ( double_t d ) noexcept { return double_t{ 1.0/d.value }; } constexpr double_t min( double_t lhs, double_t rhs ) noexcept { return double_t{ blaze::min( lhs.value, rhs.value ) }; } constexpr double_t max( double_t lhs, double_t rhs ) noexcept { return double_t{ blaze::max( lhs.value, rhs.value ) }; } } // namespace custom \endcode // Example: \code int main() { blaze::DynamicVector<custom::double_t> a{ 1.0, 2.0, 3.0, 4.0 }; blaze::DynamicVector<custom::double_t> b{ 0.1, 0.2, 0.3, 0.4 }; std::cout << "abs(a) =\n" << abs(a) << "\n"; std::cout << "sin(a) =\n" << sin(a) << "\n"; std::cout << "cos(a) =\n" << cos(a) << "\n"; std::cout << "sqrt(a) =\n" << sqrt(a) << "\n"; std::cout << "log(a) =\n" << log(a) << "\n"; std::cout << "exp(a) =\n" << exp(a) << "\n\n"; std::cout << "min(a) =\n" << min(a) << "\n"; std::cout << "max(a) =\n" << max(a) << "\n\n"; std::cout << "min(a,b) =\n" << min(a,b) << "\n"; std::cout << "max(a,b) =\n" << max(a,b) << "\n"; std::cout << "norm(a) = " << norm(a) << "\n"; } \endcode // Output: \code abs(a) = ( 1 ) ( 2 ) ( 3 ) ( 4 ) sin(a) = ( 0.841471 ) ( 0.909297 ) ( 0.14112 ) ( -0.756802 ) cos(a) = ( 0.540302 ) ( -0.416147 ) ( -0.989992 ) ( -0.653644 ) sqrt(a) = ( 1 ) ( 1.41421 ) ( 1.73205 ) ( 2 ) log(a) = ( 0 ) ( 0.693147 ) ( 1.09861 ) ( 1.38629 ) exp(a) = ( 2.71828 ) ( 7.38906 ) ( 20.0855 ) ( 54.5982 ) min(a) = 1 max(a) = 4 min(a,b) = ( 0.1 ) ( 0.2 ) ( 0.3 ) ( 0.4 ) max(a,b) = ( 1 ) ( 2 ) ( 3 ) ( 4 ) norm(a) = 5.47723 \endcode // \subsection custom_data_types_adaptors Adaptors // // If the custom data type is used in the context of the HermitianMatrix, UniLowerMatrix, or // UniUpperMatrix adaptors, it will be necessary to provide overloads of the \c isZero(), // \c isOne(), and \c isReal() functions: \code namespace custom { template< blaze::RelaxationFlag RF > constexpr bool isZero( double_t d ) { return blaze::isZero<RF>( d.value ); } template< blaze::RelaxationFlag RF > constexpr bool isOne ( double_t d ) { return blaze::isOne<RF> ( d.value ); } template< blaze::RelaxationFlag RF > constexpr bool isReal( double_t d ) { MAYBE_UNUSED( d ); return true; } } // namespace custom \endcode // Example: \code int main() { blaze::UniLowerMatrix< blaze::DynamicMatrix<custom::double_t> > L { { 1.0, 0.0, 0.0 }, { 2.0, 1.0, 0.0 }, { 3.0, 4.0, 1.0 } }; blaze::UniUpperMatrix< blaze::DynamicMatrix<custom::double_t> > U { { 1.0, 2.0, 3.0 }, { 0.0, 1.0, 4.0 }, { 0.0, 0.0, 1.0 } }; const auto A( evaluate( L * U ) ); std::cout << "A =\n" << A << "\n"; } \endcode // Output: \code A = ( 1 2 3 ) ( 2 5 10 ) ( 3 10 26 ) \endcode // \n Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref grouping_tagging \n */ //************************************************************************************************* //**Grouping/Tagging******************************************************************************* /*!\page grouping_tagging Grouping/Tagging // // \tableofcontents // // // \n \section grouping_tagging_tagging_and_groups Tagging and Groups // <hr> // // Sometimes it may be desirable to separate two or more distinct groups of vectors and matrices, // for instance in order to allow operations only within a group and to prevent operations across // groups. This goal can be achieved by means of tags. All vector and matrix classes provide a // template parameter to specify a tag (for instance, the fourth template parameter for // blaze::DynamicVector and the sixth template parameter for blaze::StaticVector): \code template< typename Type, bool TF, typename Alloc, typename Tag > class DynamicVector; template< typename Type, size_t N, bool TF, AlignmentFlag AF, PaddingFlag PF, typename Tag > class StaticVector; \endcode // By default, all vectors and matrices are associated with blaze::Group0 (i.e. the tag is set // to blaze::Group0). However, it is possible to explicitly associate vectors and matrices with // different groups: \code using blaze::DynamicVector; using blaze::AlignedAllocator; using blaze::Group0; using blaze::Group1; using blaze::columnVector; DynamicVector<int,columnVector,AlignedAllocator<int>,Group0> a0, b0; DynamicVector<int,columnVector,AlignedAllocator<int>,Group1> a1, b1; a0 + b0; // Compiles, a0 and b0 are in the same group (Group0) a1 + b1; // Compiles, a1 and b1 are in the same group (Group1) a0 + b1; // Compilation error: a0 and b1 are not in the same group \endcode // All vectors or matrices that are associated with the same group can be freely combined with any // other vector or matrix from the same group. The attempt to combine vectors and matrices from // different groups results in a compilation error. // // // \n \section grouping_tagging_creating_new_groups Creating New Groups // <hr> // // \b Blaze provides the tags for the ten predefined groups blaze::Group0 through blaze::Group9. // In order to create further groups, all that needs to be done is to create new instances of the // blaze::GroupTag class template: \code using Group10 = blaze::GroupTag<10>; using Group11 = blaze::GroupTag<11>; // ... further groups \endcode // All groups based on the blaze::GroupTag class template will be treated as separate groups just // as the ten predefined groups. // // // \n \section grouping_tagging_custom_tags Custom Tags // <hr> // // Sometimes it is not enough to separate vectors and matrices into different groups, but it is // required to define the interaction between different groups. This situation for instance occurs // if a vector or matrix is associated with a physical quantity. This problem can be solved by // using custom tags. The following example gives an impression on how to define the physics on // meters (represented by the \c Meter tag) and seconds (represented by the \c Second tag): \code struct Meter {}; // Definition of the 'Meter' tag struct Second {}; // Definition of the 'Second' tag struct SquareMeter {}; // Definition of the 'SquareMeter' tag struct MeterPerSecond {}; // Definition of the 'MeterPerSecond' tag \endcode // The \c Meter and \c Second tags are not associated with the blaze::GroupTag class template. For // that reason, by default, it is not possible to perform any operation on an accordingly tagged // vector or matrix. All required operations need to be declared explicitly in order to specify // the resulting tag of an operation. In the following code example, this happens by declaring // both the addition for the \c Meter tag and the \c Second tag, the multiplication between two // \c Meter tags and the division between \c Meter and \c Second. Note that it is enough to // declare the operations, it is not necessary to define them! \code Meter operator+( Meter , Meter ); // Enabling addition between 'Meter' Second operator+( Second, Second ); // Enabling addition between 'Second' SquareMeter operator*( Meter , Meter ); // Enabling multiplication between 'Meter' MeterPerSecond operator/( Meter , Second ); // Enabling division between 'Meter' and 'Second' \endcode // With these declarations it is now possible to add meters and seconds, but not to subtract them // (no subtraction operator was declared). Also, it is possible to multiply meters and to divide // meters and seconds: \code const DynamicVector<int,rowVector,AlignedAllocator<int>,Meter> m1{ 1, 2, 3 }; const DynamicVector<int,rowVector,AlignedAllocator<int>,Meter> m2{ 4, 5, 6 }; const DynamicVector<int,rowVector,AlignedAllocator<int>,Second> s1{ 1, 2, 3 }; const DynamicVector<int,rowVector,AlignedAllocator<int>,Second> s2{ 4, 5, 6 }; m1 + m2; // Compiles and results in vector tagged with 'Meter' s1 + s2; // Compiles and results in vector tagged with 'Second' m1 - m2; // Compilation error: No subtraction defined for 'Meter'! m1 + s2; // Compilation error: No addition between 'Meter' and 'Second' defined! m1 * m2; // Compiles and results in vector tagged with 'SquareMeter' m1 / s1; // Compiles and results in vector tagged with 'MeterPerSecond' \endcode // At this point it is possible to use the \c pow2() function for vectors and matrices tagged with // \c Meter since \c pow2() is based on multiplication, which has already been declared. However, // it is not possible to use the \c abs() function: \code pow2( m1 ); // Compiles and results in vector tagged with 'SquareMeter' abs ( m1 ); // Compilation error: No 'abs()' declared for the 'Meter' tag \endcode // In order to enable the \c abs() function it also needs to be explicitly declared for the // \c Meter tag: \code Meter abs( Meter ); // Enabling the 'abs()' function on 'Meter' abs ( m1 ); // Compiles and results in vector tagged with 'Meter' \endcode // \n Previous: \ref vector_and_matrix_customization &nbsp; &nbsp; Next: \ref error_reporting_customization \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism************************************************* /*!\page error_reporting_customization Customization of the Error Reporting Mechanism // // \tableofcontents // // // \n \section error_reporting_background Background // <hr> // // The default way of \b Blaze to report errors of any kind is to throw a standard exception. // However, although in general this approach works well, in certain environments and under // special circumstances exceptions may not be the mechanism of choice and a different error // reporting mechanism may be desirable. For this reason, \b Blaze provides several macros, // which enable the customization of the error reporting mechanism. Via these macros it is // possible to replace the standard exceptions by some other exception type or a completely // different approach to report errors. // // // \n \section error_reporting_general_customization Customization of the Reporting Mechanism // <hr> // // In some cases it might be necessary to adapt the entire error reporting mechanism and to // replace it by some other means to signal failure. The primary macro for this purpose is the // \c BLAZE_THROW macro: \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode // This macro represents the default mechanism of the \b Blaze library to report errors of any // kind. In order to customize the error reporing mechanism all that needs to be done is to // define the macro prior to including any \b Blaze header file. This will cause the \b Blaze // specific mechanism to be overridden. The following example demonstrates this by replacing // exceptions by a call to a \c log() function and a direct call to abort: \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() #include <blaze/Blaze.h> \endcode // Doing this will trigger a call to \c log() and an abort instead of throwing an exception // whenever an error (such as an invalid argument) is detected. // // \note It is possible to execute several statements instead of executing a single statement to // throw an exception. Also note that it is recommended to define the macro such that a subsequent // semicolon is required! // // \warning This macro is provided with the intention to assist in adapting \b Blaze to special // conditions and environments. However, the customization of the error reporting mechanism via // this macro can have a significant effect on the library. Thus be advised to use the macro // with due care! // // // \n \section error_reporting_exception_customization Customization of the Type of Exceptions // <hr> // // In addition to the customization of the entire error reporting mechanism it is also possible // to customize the type of exceptions being thrown. This can be achieved by customizing any // number of the following macros: \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( MESSAGE ) ) #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( std::invalid_argument( MESSAGE ) ) #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( MESSAGE ) ) #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( MESSAGE ) ) #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( std::runtime_error( MESSAGE ) ) \endcode // In order to customize the type of exception the according macro has to be defined prior to // including any \b Blaze header file. This will override the \b Blaze default behavior. The // following example demonstrates this by replacing \c std::invalid_argument by a custom // exception type: \code class InvalidArgument { public: InvalidArgument(); explicit InvalidArgument( const std::string& message ); // ... }; #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( InvalidArgument( MESSAGE ) ) #include <blaze/Blaze.h> \endcode // By manually defining the macro, an \c InvalidArgument exception is thrown instead of a // \c std::invalid_argument exception. Note that it is recommended to define the macro such // that a subsequent semicolon is required! // // \warning These macros are provided with the intention to assist in adapting \b Blaze to // special conditions and environments. However, the customization of the type of an exception // via this macro may have an effect on the library. Thus be advised to use the macro with due // care! // // // \n \section error_reporting_special_errors Customization of Special Errors // <hr> // // Last but not least it is possible to customize the error reporting for special kinds of errors. // This can be achieved by customizing any number of the following macros: \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \endcode // As explained in the previous sections, in order to customize the handling of special errors // the according macro has to be defined prior to including any \b Blaze header file. This will // override the \b Blaze default behavior. // // // \n Previous: \ref grouping_tagging &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions********************************************************************************* /*!\page blas_functions BLAS Functions // // \tableofcontents // // // For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements // several convenient C++ wrapper functions for several BLAS functions. The following sections // give a complete overview of all available BLAS level 1, 2 and 3 functions. // // // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection blas_level_1_dotu Dot Product (dotu) // // The following wrapper functions provide a generic interface for the BLAS functions for the // dot product of two dense vectors (\c cblas_sdot(), \c cblas_ddot(), \c cblas_cdotu_sub(), and // \c cblas_zdotu_sub()): \code namespace blaze { float dotu( blas_int_t n, const float* x, blas_int_t incX, const float* y, blas_int_t incY ); double dotu( blas_int_t n, const double* x, blas_int_t incX, const double* y, blas_int_t incY ); complex<float> dotu( blas_int_t n, const complex<float>* x, blas_int_t incX, const complex<float>* y, blas_int_t incY ); complex<double> dotu( blas_int_t n, const complex<double>* x, blas_int_t incX, const complex<double>* y, blas_int_t incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc) // // The following wrapper functions provide a generic interface for the BLAS functions for the // complex conjugate dot product of two dense vectors (\c cblas_sdot(), \c cblas_ddot(), // \c cblas_cdotc_sub(), and \c cblas_zdotc_sub()): \code namespace blaze { float dotc( blas_int_t n, const float* x, blas_int_t incX, const float* y, blas_int_t incY ); double dotc( blas_int_t n, const double* x, blas_int_t incX, const double* y, blas_int_t incY ); complex<float> dotc( blas_int_t n, const complex<float>* x, blas_int_t incX, const complex<float>* y, blas_int_t incY ); complex<double> dotc( blas_int_t n, const complex<double>* x, blas_int_t incX, const complex<double>* y, blas_int_t incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_axpy Axpy Product (axpy) // // The following wrapper functions provide a generic interface for the BLAS functions for the // axpy product of two dense vectors (\c cblas_saxpy(), \c cblas_daxpy(), \c cblas_caxpy(), and // \c cblas_zaxpy()): \code namespace blaze { void axpy( blas_int_t n, float alpha, const float* x, blas_int_t incX, float* y, blas_int_t incY ); void axpy( blas_int_t n, double alpha, const double* x, blas_int_t incX, double* y, blas_int_t incY ); void axpy( blas_int_t n, complex<float> alpha, const complex<float>* x, blas_int_t incX, complex<float>* y, blas_int_t incY ); void axpy( blas_int_t n, complex<double> alpha, const complex<double>* x, blas_int_t incX, complex<double>* y, blas_int_t incY ); template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST > void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha ); } // namespace blaze \endcode // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/vector multiplication (\c cblas_sgemv(), \c cblas_dgemv(), \c cblas_cgemv(), // and \c cblas_zgemv()): \code namespace blaze { void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n, float alpha, const float* A, blas_int_t lda, const float* x, blas_int_t incX, float beta, float* y, blas_int_t incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n, double alpha, const double* A, blas_int_t lda, const double* x, blas_int_t incX, double beta, double* y, blas_int_t incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n, complex<float> alpha, const complex<float>* A, blas_int_t lda, const complex<float>* x, blas_int_t incX, complex<float> beta, complex<float>* y, blas_int_t incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, blas_int_t m, blas_int_t n, complex<double> alpha, const complex<double>* A, blas_int_t lda, const complex<double>* x, blas_int_t incX, complex<double> beta, complex<double>* y, blas_int_t incY ); } // namespace blaze \endcode // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/vector multiplication with a triangular matrix (\c cblas_strmv(), \c cblas_dtrmv(), // \c cblas_ctrmv(), and \c cblas_ztrmv()): \code namespace blaze { void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t n, const float* A, blas_int_t lda, float* x, blas_int_t incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t n, const double* A, blas_int_t lda, double* x, blas_int_t incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t n, const complex<float>* A, blas_int_t lda, complex<float>* x, blas_int_t incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t n, const complex<double>* A, blas_int_t lda, complex<double>* x, blas_int_t incX ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); } // namespace blaze \endcode // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/matrix multiplication (\c cblas_sgemm(), \c cblas_dgemm(), \c cblas_cgemm(), // and \c cblas_zgemm()): \code namespace blaze { void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, blas_int_t m, blas_int_t n, blas_int_t k, float alpha, const float* A, blas_int_t lda, const float* B, blas_int_t ldb, float beta, float* C, blas_int_t ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, blas_int_t m, blas_int_t n, blas_int_t k, double alpha, const double* A, blas_int_t lda, const double* B, blas_int_t ldb, double beta, float* C, blas_int_t ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, blas_int_t m, blas_int_t n, blas_int_t k, complex<float> alpha, const complex<float>* A, blas_int_t lda, const complex<float>* B, blas_int_t ldb, complex<float> beta, float* C, blas_int_t ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, blas_int_t m, blas_int_t n, blas_int_t k, complex<double> alpha, const complex<double>* A, blas_int_t lda, const complex<double>* B, blas_int_t ldb, complex<double> beta, float* C, blas_int_t ldc );x } // namespace blaze \endcode // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/matrix multiplication with a triangular matrix (\c cblas_strmm(), \c cblas_dtrmm(), // \c cblas_ctrmm(), and \c cblas_ztrmm()): \code namespace blaze { void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, float alpha, const float* A, blas_int_t lda, float* B, blas_int_t ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, double alpha, const double* A, blas_int_t lda, double* B, blas_int_t ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<float> alpha, const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<double> alpha, const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The following wrapper functions provide a generic interface for the BLAS functions for solving // a triangular system of equations (\c cblas_strsm(), \c cblas_dtrsm(), \c cblas_ctrsm(), and // \c cblas_ztrsm()): \code namespace blaze { void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, float alpha, const float* A, blas_int_t lda, float* B, blas_int_t ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, double alpha, const double* A, blas_int_t lda, double* B, blas_int_t ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<float> alpha, const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, blas_int_t m, blas_int_t n, complex<double> alpha, const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb ); template< typename MT, bool SO, typename VT, bool TF, typename ST > void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n Previous: \ref error_reporting_customization &nbsp; &nbsp; Next: \ref lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions******************************************************************************* /*!\page lapack_functions LAPACK Functions // // \tableofcontents // // // \n \section lapack_introction Introduction // <hr> // // The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks // (including the decomposition, inversion and the computation of the determinant of dense matrices). // For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required // LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper // functions. For more details on the individual LAPACK functions see the \b Blaze function // documentation or the LAPACK online documentation browser: // // http://www.netlib.org/lapack/explore-html/ // // Most of the wrapper functions are implemented as thin wrappers around LAPACK functions. They // provide the parameters of the original LAPACK functions and thus provide maximum flexibility: \code using blaze::blas_int_t; constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const blas_int_t m ( numeric_cast<blas_int_t>( A.rows() ) ); // == N const blas_int_t n ( numeric_cast<blas_int_t>( A.columns() ) ); // == N const blas_int_t lda ( numeric_cast<blas_int_t>( A.spacing() ) ); // >= N const blas_int_t lwork( n*lda ); const std::unique_ptr<blas_int_t[]> ipiv( new blas_int_t[N] ); // No initialization required const std::unique_ptr<double[]> work( new double[N] ); // No initialization required blas_int_t info( 0 ); getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports failure via 'info' getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, &info ); // Reports failure via 'info' \endcode // In this context, \c blas_int_t is either a 32-bit or 64-bit signed integral type, depending // on the setting of the \c BLAZE_BLAS_IS_64BIT compilation switch (see \ref blas_mode). // // Additionally, \b Blaze provides wrappers that provide a higher level of abstraction. These // wrappers provide a maximum of convenience: \code using blaze::blas_int_t; constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const std::unique_ptr<blas_int_t[]> ipiv( new blas_int_t[N] ); // No initialization required getrf( A, ipiv.get() ); // Cannot fail getri( A, ipiv.get() ); // Reports failure via exception \endcode // \note All functions only work for general, non-adapted matrices with \c float, \c double, // \c complex<float>, or \c complex<double> element type. The attempt to call the function with // adaptors or matrices of any other element type results in a compile time error! // // \note All functions can only be used if a fitting LAPACK library is available and linked to // the final executable. Otherwise a call to this function will result in a linker error. // // \note For performance reasons all functions do only provide the basic exception safety guarantee, // i.e. in case an exception is thrown the given matrix may already have been modified. // // // \n \section lapack_decomposition Matrix Decomposition // <hr> // // The following functions decompose/factorize the given dense matrix. Based on this decomposition // the matrix can be inverted or used to solve a linear system of equations. // // // \n \subsection lapack_lu_decomposition LU Decomposition // // The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix: \code namespace blaze { void getrf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info ); void getrf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info ); void getrf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info ); void getrf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, blas_int_t* info ); template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, blas_int_t* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = P \cdot L \cdot U, \f]\n // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper // triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major // matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit // diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is // transposed. // // \note The LU decomposition will never fail, even for singular matrices. However, in case of a // singular matrix the resulting decomposition cannot be used for a matrix inversion or solving // a linear system of equations. // // // \n \subsection lapack_ldlt_decomposition LDLT Decomposition // // The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given // symmetric indefinite matrix: \code namespace blaze { void sytrf( char uplo, blas_int_t n, float* A, blas_int_t lda, blas_int_t* ipiv, float* work, blas_int_t lwork, blas_int_t* info ); void sytrf( char uplo, blas_int_t n, double* A, blas_int_t lda, blas_int_t* ipiv, double* work, blas_int_t lwork, blas_int_t* info ); void sytrf( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void sytrf( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char uplo, blas_int_t* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_ldlh_decomposition LDLH Decomposition // // The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix: \code namespace blaze { void hetrf( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void hetrf( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char uplo, blas_int_t* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_llh_decomposition Cholesky Decomposition // // The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given // positive definite matrix: \code namespace blaze { void potrf( char uplo, blas_int_t n, float* A, blas_int_t lda, blas_int_t* info ); void potrf( char uplo, blas_int_t n, double* A, blas_int_t lda, blas_int_t* info ); void potrf( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* info ); void potrf( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* info ); template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky // decomposition fails if the given matrix \a A is not a positive definite matrix. In this case // a \c std::invalid_argument exception is thrown. // // // \n \subsection lapack_qr_decomposition QR Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix: \code namespace blaze { void geqrf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void geqrf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info ); void geqrf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void geqrf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot R, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the // min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n); // the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c sorg2r(), \c dorg2r(), \c cungqr(), \c zunqqr(), \c cung2r(), and \c zung2r(), which // reconstruct the \c Q matrix from a QR decomposition: \code namespace blaze { void orgqr( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void orgqr( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void org2r( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info ); void org2r( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info ); template< typename MT, bool SO > void org2r( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ungqr( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void ungqr( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ung2r( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info ); void ung2r( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void ung2r( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from // a QR decomposition: \code namespace blaze { void ormqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info ); void ormqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); void unmqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void unmqr( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO, typename MT2 > void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_rq_decomposition RQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix: \code namespace blaze { void gerqf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void gerqf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info ); void gerqf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void gerqf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = R \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case // \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau // represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // \c sorgr2(), \c dorgr2(), \c cungrq(), \c zunqrq(), \c cungr2(), and \c zunqr2(), which // reconstruct the \c Q matrix from a RQ decomposition: \code namespace blaze { void orgrq( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void orgrq( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void orgr2( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info ); void orgr2( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info ); template< typename MT, bool SO > void orgr2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ungrq( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void ungrq( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ungr2( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info ); void ungr2( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void ungr2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from // a RQ decomposition: \code namespace blaze { void ormrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info ); void ormrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); void unmrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void unmrq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO, typename MT2 > void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_ql_decomposition QL Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix: \code namespace blaze { void geqlf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void geqlf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info ); void geqlf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void geqlf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot L, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n, // the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(), // \c sorg2l(), \c dorg2l(), \c cungql(), \c zungql(), \c cung2l(), and \c zung2l(), which // reconstruct the \c Q matrix from an QL decomposition: \code namespace blaze { void orgql( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void orgql( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void org2l( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info ); void org2l( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info ); template< typename MT, bool SO > void org2l( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ungql( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void ungql( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ung2l( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info ); void ung2l( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void ung2l( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from // a QL decomposition: \code namespace blaze { void ormql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info ); void ormql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); void unmql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void unmql( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO, typename MT2 > void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_lq_decomposition LQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix: \code namespace blaze { void gelqf( blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void gelqf( blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* tau, double* work, blas_int_t lwork, blas_int_t* info ); void gelqf( blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void gelqf( blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = L \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n); // the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(), // \c sorgl2(), \c dorgl2(), \c cunglq(), \c zunqlq(), \c cungl2(), and \c zunql2(), which // reconstruct the \c Q matrix from an LQ decomposition: \code namespace blaze { void orglq( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t lwork, blas_int_t* info ); void orglq( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void orgl2( blas_int_t m, blas_int_t n, blas_int_t k, float* A, blas_int_t lda, const float* tau, float* work, blas_int_t* info ); void orgl2( blas_int_t m, blas_int_t n, blas_int_t k, double* A, blas_int_t lda, const double* tau, double* work, blas_int_t* info ); template< typename MT, bool SO > void orgl2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void unglq( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void unglq( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); void ungl2( blas_int_t m, blas_int_t n, blas_int_t k, complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* work, blas_int_t* info ); void ungl2( blas_int_t m, blas_int_t n, blas_int_t k, complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void ungl2( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from // a LQ decomposition: \code namespace blaze { void ormlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const float* A, blas_int_t lda, const float* tau, float* C, blas_int_t ldc, float* work, blas_int_t lwork, blas_int_t* info ); void ormlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const double* A, blas_int_t lda, const double* tau, double* C, blas_int_t ldc, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); void unmlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<float>* A, blas_int_t lda, const complex<float>* tau, complex<float>* C, blas_int_t ldc, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void unmlq( char side, char trans, blas_int_t m, blas_int_t n, blas_int_t k, const complex<double>* A, blas_int_t lda, const complex<double>* tau, complex<double>* C, blas_int_t ldc, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT1, bool SO, typename MT2 > void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix that has already been decomposed, the following functions can be used to invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion LU-based Inversion // // The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by // an \ref lapack_lu_decomposition : \code namespace blaze { void getri( blas_int_t n, float* A, blas_int_t lda, const blas_int_t* ipiv, float* work, blas_int_t lwork, blas_int_t* info ); void getri( blas_int_t n, double* A, blas_int_t lda, const blas_int_t* ipiv, double* work, blas_int_t lwork, blas_int_t* info ); void getri( blas_int_t n, complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void getri( blas_int_t n, complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const blas_int_t* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based Inversion // // The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been // decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytri( char uplo, blas_int_t n, float* A, blas_int_t lda, const blas_int_t* ipiv, float* work, blas_int_t* info ); void sytri( char uplo, blas_int_t n, double* A, blas_int_t lda, const blas_int_t* ipiv, double* work, blas_int_t* info ); void sytri( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* work, blas_int_t* info ); void sytri( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char uplo, const blas_int_t* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based Inversion // // The following functions provide an interface for the LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by // an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetri( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* work, blas_int_t* info ); void hetri( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* work, blas_int_t* info ); template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char uplo, const blas_int_t* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_inversion Cholesky-based Inversion // // The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been // decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potri( char uplo, blas_int_t n, float* A, blas_int_t lda, blas_int_t* info ); void potri( char uplo, blas_int_t n, double* A, blas_int_t lda, blas_int_t* info ); void potri( char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* info ); void potri( char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* info ); template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place: \code namespace blaze { void trtri( char uplo, char diag, blas_int_t n, float* A, blas_int_t lda, blas_int_t* info ); void trtri( char uplo, char diag, blas_int_t n, double* A, blas_int_t lda, blas_int_t* info ); void trtri( char uplo, char diag, blas_int_t n, complex<float>* A, blas_int_t lda, blas_int_t* info ); void trtri( char uplo, char diag, blas_int_t n, complex<double>* A, blas_int_t lda, blas_int_t* info ); template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \c std::invalid_argument exception in case of an error. // // // \n \section lapack_substitution Substitution // <hr> // // Given a matrix that has already been decomposed the following functions can be used to perform // the forward/backward substitution step to compute the solution to a system of linear equations. // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \n \subsection lapack_lu_substitution LU-based Substitution // // The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has // already been decomposed by an \ref lapack_lu_decomposition : \code namespace blaze { void getrs( char trans, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, const blas_int_t* ipiv, float* B, blas_int_t ldb, blas_int_t* info ); void getrs( char trans, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, const blas_int_t* ipiv, double* B, blas_int_t ldb, blas_int_t* info ); void getrs( char trans, blas_int_t n, const complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void getrs( char trans, blas_int_t n, const complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems (see \ref lapack_substitution). If the function exits // successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of // equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_substitution LDLT-based Substitution // // The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite // matrix that has already been decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, const blas_int_t* ipiv, float* B, blas_int_t ldb, blas_int_t* info ); void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, const blas_int_t* ipiv, double* B, blas_int_t ldb, blas_int_t* info ); void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void sytrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems (see \ref lapack_substitution). If the function exits // successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of // equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(), // which perform the substitution step for an Hermitian indefinite matrix that has already been // decomposed by an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, const blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void hetrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, const blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems (see \ref lapack_substitution). If the function exits // successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of // equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_substitution Cholesky-based Substitution // // The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix // that has already been decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, float* B, blas_int_t ldb, blas_int_t* info ); void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, double* B, blas_int_t ldb, blas_int_t* info ); void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void potrs( char uplo, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems (see \ref lapack_substitution). If the function exits // successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of // equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix: \code namespace blaze { void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const float* A, blas_int_t lda, float* B, blas_int_t ldb, blas_int_t* info ); void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const double* A, blas_int_t lda, double* B, blas_int_t ldb, blas_int_t* info ); void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void trtrs( char uplo, char trans, char diag, blas_int_t n, blas_int_t nrhs, const complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems (see \ref lapack_substitution). If the function exits // successfully, the vector \a b or the matrix \a B contain the solution(s) of the linear system of // equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \c std::invalid_argument exception in case of an error. // // // \n \section lapack_linear_system_solver Linear System Solver // <hr> // // The following functions represent compound functions that perform both the decomposition step // as well as the substitution step to compute the solution to a system of linear equations. Note // that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \subsection lapack_lu_linear_system_solver LU-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according // \ref lapack_lu_substitution : \code namespace blaze { void gesv( blas_int_t n, blas_int_t nrhs, float* A, blas_int_t lda, blas_int_t* ipiv, float* B, blas_int_t ldb, blas_int_t* info ); void gesv( blas_int_t n, blas_int_t nrhs, double* A, blas_int_t lda, blas_int_t* ipiv, double* B, blas_int_t ldb, blas_int_t* info ); void gesv( blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void gesv( blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side // the functions solve different equation systems (see \ref lapack_linear_system_solver). If // the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_lu_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according // \ref lapack_ldlt_substitution : \code namespace blaze { void sysv( char uplo, blas_int_t n, blas_int_t nrhs, float* A, blas_int_t lda, blas_int_t* ipiv, float* B, blas_int_t ldb, float* work, blas_int_t lwork, blas_int_t* info ); void sysv( char uplo, blas_int_t n, blas_int_t nrhs, double* A, blas_int_t lda, blas_int_t* ipiv, double* B, blas_int_t ldb, double* work, blas_int_t lwork, blas_int_t* info ); void sysv( char uplo, blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void sysv( char uplo, blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side // the functions solve different equation systems (see \ref lapack_linear_system_solver). If // the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlt_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according // \ref lapack_ldlh_substitution : \code namespace blaze { void hesv( char uplo, blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, blas_int_t* ipiv, complex<float>* B, blas_int_t ldb, complex<float>* work, blas_int_t lwork, blas_int_t* info ); void hesv( char uplo, blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, blas_int_t* ipiv, complex<double>* B, blas_int_t ldb, complex<double>* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, blas_int_t* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, blas_int_t* ipiv ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side // the functions solve different equation systems (see \ref lapack_linear_system_solver). If // the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first two functions report failure via the \c info argument, the fifth function throws a // \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according // \ref lapack_llh_substitution : \code namespace blaze { void posv( char uplo, blas_int_t n, blas_int_t nrhs, float* A, blas_int_t lda, float* B, blas_int_t ldb, blas_int_t* info ); void posv( char uplo, blas_int_t n, blas_int_t nrhs, double* A, blas_int_t lda, double* B, blas_int_t ldb, blas_int_t* info ); void posv( char uplo, blas_int_t n, blas_int_t nrhs, complex<float>* A, blas_int_t lda, complex<float>* B, blas_int_t ldb, blas_int_t* info ); void posv( char uplo, blas_int_t n, blas_int_t nrhs, complex<double>* A, blas_int_t lda, complex<double>* B, blas_int_t ldb, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side // the functions solve different equation systems (see \ref lapack_linear_system_solver). If // the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_llh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \c std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), and \c ztrsv(): \code namespace blaze { void trsv( char uplo, char trans, char diag, blas_int_t n, const float* A, blas_int_t lda, float* x, blas_int_t incX ); void trsv( char uplo, char trans, char diag, blas_int_t n, const double* A, blas_int_t lda, double* x, blas_int_t incX ); void trsv( char uplo, char trans, char diag, blas_int_t n, const complex<float>* A, blas_int_t lda, complex<float>* x, blas_int_t incX ); void trsv( char uplo, char trans, char diag, blas_int_t n, const complex<double>* A, blas_int_t lda, complex<double>* x, blas_int_t incX ); template< typename MT, bool SO, typename VT, bool TF > void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); } // namespace blaze \endcode // Note that depending on the storage order of the system matrix and the given right-hand side // the functions solve different equation systems (see \ref lapack_linear_system_solver). If the // function exits successfully, the vector \a b or the matrix \a B contain the solution(s) of the // linear system of equations. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'. // // The last function throws a \c std::invalid_argument exception in case of an error. Note that // none of the functions does perform any test for singularity or near-singularity. Such tests // must be performed prior to calling this function! // // // \n \section lapack_eigenvalues Eigenvalues/Eigenvectors // // \subsection lapack_eigenvalues_general General Matrices // // The following functions provide an interface for the LAPACK functions \c sgeev(), \c dgeev(), // \c cgeev(), and \c zgeev(), which compute the eigenvalues and optionally the eigenvectors of // the given general matrix: \code namespace blaze { void geev( char jobvl, char jobvr, blas_int_t n, float* A, blas_int_t lda, float* wr, float* wi, float* VL, blas_int_t ldvl, float* VR, blas_int_t ldvr, float* work, blas_int_t lwork, blas_int_t* info ); void geev( char jobvl, char jobvr, blas_int_t n, double* A, blas_int_t lda, double* wr, double* wi, double* VL, blas_int_t ldvl, double* VR, blas_int_t ldvr, double* work, blas_int_t lwork, blas_int_t* info ); void geev( char jobvl, char jobvr, blas_int_t n, complex<float>* A, blas_int_t lda, complex<float>* w, complex<float>* VL, blas_int_t ldvl, complex<float>* VR, blas_int_t ldvr, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* info ); void geev( char jobvl, char jobvr, blas_int_t n, complex<double>* A, blas_int_t lda, complex<double>* w, complex<double>* VL, blas_int_t ldvl, complex<double>* VR, blas_int_t ldvr, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void geev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& VR ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF, typename MT3, bool SO3 > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR ); } // namespace blaze \endcode // The complex eigenvalues of the given matrix \a A are returned in the given vector \a w. // Please note that no order of eigenvalues can be assumed, except that complex conjugate pairs // of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part // first. // // If \a VR is provided as an argument, the right eigenvectors are returned in the rows of \a VR // in case \a VR is a row-major matrix and in the columns of \a VR in case \a VR is a column-major // matrix. The right eigenvector \f$v[j]\f$ of \a A satisfies \f[ A * v[j] = lambda[j] * v[j], \f] // where \f$lambda[j]\f$ is its eigenvalue. // // If \a VL is provided as an argument, the left eigenvectors are returned in the rows of \a VL // in case \a VL is a row-major matrix and in the columns of \a VL in case \a VL is a column-major // matrix. The left eigenvector \f$u[j]\f$ of \a A satisfies \f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f] // where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$. // // \a w, \a VL, and \a VR are resized to the correct dimensions (if possible and necessary). The // functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given matrix \a VL is a fixed size matrix and the dimensions don't match; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a VR is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // The first four functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_symmetric Symmetric Matrices // // The following functions provide an interface for the LAPACK functions \c ssyev() and \c dsyev(), // which compute the eigenvalues and eigenvectors of the given symmetric matrix: \code namespace blaze { void syev( char jobz, char uplo, blas_int_t n, float* A, blas_int_t lda, float* w, float* work, blas_int_t lwork, blas_int_t* info ); void syev( char jobz, char uplo, blas_int_t n, double* A, blas_int_t lda, double* w, double* work, blas_int_t lwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void syev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c ssyevd() and \c dsyevd(). In contrast to the \c syev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void syevd( char jobz, char uplo, blas_int_t n, float* A, blas_int_t lda, float* w, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t liwork, blas_int_t* info ); void syevd( char jobz, char uplo, blas_int_t n, double* A, blas_int_t lda, double* w, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t liwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void syevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c ssyevx() and \c dsyevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of a symmetric matrix: \code namespace blaze { void syevx( char jobz, char range, char uplo, blas_int_t n, float* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, float abstol, blas_int_t* m, float* w, float* Z, blas_int_t ldz, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info ); void syevx( char jobz, char range, char uplo, blas_int_t n, double* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, double abstol, blas_int_t* m, double* w, double* Z, blas_int_t ldz, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_hermitian Hermitian Matrices // // The following functions provide an interface for the LAPACK functions \c cheev() and \c zheev(), // which compute the eigenvalues and eigenvectors of the given Hermitian matrix: \code namespace blaze { void heev( char jobz, char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, float* w, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* info ); void heev( char jobz, char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, double* w, complex<double>* work, blas_int_t lwork, float* rwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void heev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c cheevd() and \c zheevd(). In contrast to the \c heev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void heevd( char jobz, char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, float* w, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* lrwork, blas_int_t* iwork, blas_int_t* liwork, blas_int_t* info ); void heevd( char jobz, char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, double* w, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t lrwork, blas_int_t* iwork, blas_int_t* liwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void heevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c cheevx() and \c zheevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of an Hermitian matrix: \code namespace blaze { void heevx( char jobz, char range, char uplo, blas_int_t n, complex<float>* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, float abstol, blas_int_t* m, float* w, complex<float>* Z, blas_int_t ldz, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info ); void heevx( char jobz, char range, char uplo, blas_int_t n, complex<double>* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, double abstol, blas_int_t* m, double* w, complex<double>* Z, blas_int_t ldz, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* iwork, blas_int_t* ifail, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \section lapack_singular_values Singular Values/Singular Vectors // // The following functions provide an interface for the LAPACK functions \c sgesvd(), \c dgesvd(), // \c cgesvd(), and \c zgesvd(), which perform a singular value decomposition (SVD) on the given // general matrix: \code namespace blaze { void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* s, float* U, blas_int_t ldu, float* V, blas_int_t ldv, float* work, blas_int_t lwork, blas_int_t* info ); void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* s, double* U, blas_int_t ldu, double* V, blas_int_t ldv, double* work, blas_int_t lwork, blas_int_t* info ); void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, float* s, complex<float>* U, blas_int_t ldu, complex<float>* V, blas_int_t ldv, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* info ); void gesvd( char jobu, char jobv, blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, double* s, complex<double>* U, blas_int_t ldu, complex<double>* V, blas_int_t ldv, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void gesvd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > void gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c sgesdd(), \c dgesdd(), \c cgesdd(), and \c zgesdd(). In contrast to the \c gesvd() // functions they compute the singular value decomposition (SVD) of the given general matrix by // applying a divide-and-conquer strategy for the computation of the left and right singular // vectors: \code namespace blaze { void gesdd( char jobz, blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float* s, float* U, blas_int_t ldu, float* V, blas_int_t ldv, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info ); void gesdd( char jobz, blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double* s, double* U, blas_int_t ldu, double* V, blas_int_t ldv, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info ); void gesdd( char jobz, blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, float* s, complex<float>* U, blas_int_t ldu, complex<float>* V, blas_int_t ldv, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* iwork, blas_int_t* info ); void gesdd( char jobz, blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, double* s, complex<double>* U, blas_int_t ldu, complex<double>* V, blas_int_t ldv, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* iwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > void gesdd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz ); } // namespace blaze \endcode // The resulting decomposition has the form \f[ A = U \cdot S \cdot V, \f] // where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a m,\a n) diagonal // elements, \a U is an \a m-by-\a m orthogonal matrix, and \a V is a \a n-by-\a n orthogonal // matrix. The diagonal elements of \a S are the singular values of \a A, the first min(\a m,\a n) // columns of \a U and rows of \a V are the left and right singular vectors of \a A, respectively. // // The resulting min(\a m,\a n) real and non-negative singular values are returned in descending // order in the vector \a s, which is resized to the correct size (if possible and necessary). // // Via the following functions, which wrap the LAPACK functions \c sgesvdx(), \c dgesvdx(), // \c cgesvdx(), and \c zgesvdx(), it is possible to compute a subset of singular values and/or // vectors: \code namespace blaze { void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, float* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, float* s, float* U, blas_int_t ldu, float* V, blas_int_t ldv, float* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info ); void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, double* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, double* s, double* U, blas_int_t ldu, double* V, blas_int_t ldv, double* work, blas_int_t lwork, blas_int_t* iwork, blas_int_t* info ); void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, complex<float>* A, blas_int_t lda, float vl, float vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, float* s, complex<float>* U, blas_int_t ldu, complex<float>* V, blas_int_t ldv, complex<float>* work, blas_int_t lwork, float* rwork, blas_int_t* iwork, blas_int_t* info ); void gesvdx( char jobu, char jobv, char range, blas_int_t m, blas_int_t n, complex<double>* A, blas_int_t lda, double vl, double vu, blas_int_t il, blas_int_t iu, blas_int_t* ns, double* s, complex<double>* U, blas_int_t ldu, complex<double>* V, blas_int_t ldv, complex<double>* work, blas_int_t lwork, double* rwork, blas_int_t* iwork, blas_int_t* info ); template< typename MT, bool SO, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp ); } // namespace blaze \endcode // The number of singular values to be computed is specified by the lower bound \a low and the // upper bound \a upp, which either form an integral or a floating point range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // The first four functions report failure via the \c info argument, the remaining functions throw // an exception in case of an error. // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: \ref block_vectors_and_matrices \n */ //************************************************************************************************* //**Block Vectors and Matrices********************************************************************* /*!\page block_vectors_and_matrices Block Vectors and Matrices // // \tableofcontents // // // \n \section block_vectors_and_matrices_general General Concepts // <hr> // // In addition to fundamental element types, the \b Blaze library supports vectors and matrices // with non-fundamental element type. For instance, it is possible to define block matrices by // using a matrix type as the element type: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A; DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y; // ... Resizing and initialization y = A * x; \endcode // The matrix/vector multiplication in this example runs fully parallel and uses vectorization // for every inner matrix/vector multiplication and vector addition. // // // \n \section block_vectors_and_matrices_pitfalls Pitfalls // <hr> // // The only thing to keep in mind when using non-fundamental element types is that all operations // between the elements have to be well defined. More specifically, the size of vector and matrix // elements has to match. The attempt to combine two non-matching elements results in either a // compilation error (in case of statically sized elements) or an exception (for dynamically sized // elements): \code DynamicVector< StaticVector<int,2UL> > a; DynamicVector< StaticVector<int,3UL> > b; DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match \endcode // Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector, // \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized // accordingly upfront. // // // \n \section block_vectors_and_matrices_examples Examples // <hr> // // The first example demonstrates the multiplication between a statically sized block matrix // and a block vector: \code using namespace blaze; // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ) * ( ) = ( ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) using M2x2 = StaticMatrix<int,2UL,2UL,rowMajor>; using V2 = StaticVector<int,2UL,columnVector>; DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) }, { M2x2(3), M2x2(4) } }; DynamicVector<V2,columnVector> x{ V2(1), V2(2) }; DynamicVector<V2,columnVector> y( A * x ); \endcode // The second example shows the multiplication between a compressed block matrix with blocks of // varying size and a compressed block vector: \code using namespace blaze; // ( ( 1 -2 3 ) ( 5 -1 ) ) ( ( 1 ) ) ( ( -3 ) ) // ( ( 4 1 0 ) ( 1 2 ) ) ( ( 0 ) ) ( ( 7 ) ) // ( ( 0 2 4 ) ( 3 1 ) ) ( ( 1 ) ) ( ( 3 ) ) // ( ) ( ) ( ) // ( ( 1 ) ) * ( ( 2 ) ) = ( ( 2 ) ) // ( ) ( ) ( ) // ( ( 0 -1 1 ) ( 1 0 ) ) ( ( -1 ) ) ( ( 0 ) ) // ( ( 2 -1 2 ) ( 0 1 ) ) ( ( 2 ) ) ( ( 6 ) ) using M3x3 = HybridMatrix<int,3UL,3UL,rowMajor>; using V3 = HybridVector<int,3UL,columnVector>; CompressedMatrix<M3x3,rowMajor> A( 3UL, 3UL, 5UL ); A(0,0) = M3x3{ { 1, -2, 3 }, { 4, 1, 0 }, { 0, 2, 4 } }; A(0,2) = M3x3{ { 5, -1 }, { 1, 2 }, { 3, 1 } }; A(1,1) = M3x3{ { 1 } }; A(2,0) = M3x3{ { 0, -1, 1 }, { 2, -1, 2 } }; A(2,2) = M3x3{ { 1, 0 }, { 0, 1 } }; CompressedVector<V3,columnVector> x( 3UL, 3UL ); x[0] = V3{ 1, 0, 1 }; x[1] = V3{ 2 }; x[2] = V3{ -1, 2 }; CompressedVector<V3,columnVector> y( A * x ); \endcode // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref intra_statement_optimization \n */ //************************************************************************************************* //**Intra-Statement Optimization******************************************************************* /*!\page intra_statement_optimization Intra-Statement Optimization // // One of the prime features of the \b Blaze library is the automatic intra-statement optimization. // In order to optimize the overall performance of every single statement \b Blaze attempts to // rearrange the operands based on their types. For instance, the following addition of dense and // sparse vectors \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1 + d2; \endcode // is automatically rearranged and evaluated as \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged \endcode // This order of operands is highly favorable for the overall performance since the addition of // the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized // fashion. // // This intra-statement optimization can have a tremendous effect on the performance of a statement. // Consider for instance the following computation: \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = A * B * x; \endcode // Since multiplications are evaluated from left to right, this statement would result in a // matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the // right subexpression is evaluated first, the performance can be dramatically improved since the // matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication. // The \b Blaze library exploits this by automatically restructuring the expression such that the // right multiplication is evaluated first: \code // ... y = A * ( B * x ); \endcode // Note however that although this intra-statement optimization may result in a measurable or // even significant performance improvement, this behavior may be undesirable for several reasons, // for instance because of numerical stability. Therefore, in case the order of evaluation matters, // the best solution is to be explicit and to separate a statement into several statements: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... d3 += d2; // ... and afterwards add the second dense vector \endcode \code // ... blaze::DynamicMatrix<double> A, B, C; blaze::DynamicVector<double> x, y; // ... Resizing and initialization C = A * B; // Compute the left-hand side matrix-matrix multiplication first ... y = C * x; // ... before the right-hand side matrix-vector multiplication \endcode // Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + eval( s1 + d2 ); \endcode \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = eval( A * B ) * x; \endcode // \n Previous: \ref block_vectors_and_matrices &nbsp; &nbsp; Next: \ref faq \n */ //************************************************************************************************* //**FAQ******************************************************************************************** /*!\page faq Frequently Asked Questions (FAQ) // // \tableofcontents // // // <hr> // \section faq_padding A StaticVector/StaticMatrix is larger than expected. Is this a bug? // // The size of a \ref vector_types_static_vector, \ref matrix_types_static_matrix, // \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix can indeed be larger // than expected: \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; sizeof( a ); // Evaluates to 16, 32, or even 64, but not 12 sizeof( A ); // Evaluates to 48, 96, or even 144, but not 36 \endcode // In order to achieve the maximum possible performance the \b Blaze library tries to enable // SIMD vectorization even for small vectors. For that reason \b Blaze by default uses padding // elements for all dense vectors and matrices to guarantee that at least a single SIMD vector // can be loaded. Depending on the used SIMD technology that can significantly increase the size // of a \ref vector_types_static_vector, \ref matrix_types_static_matrix, // \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix : \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; sizeof( a ); // Evaluates to 16 in case of SSE, 32 in case of AVX, and 64 in case of AVX-512 // (under the assumption that an integer occupies 4 bytes) sizeof( A ); // Evaluates to 48 in case of SSE, 96 in case of AVX, and 144 in case of AVX-512 // (under the assumption that an integer occupies 4 bytes) \endcode // The configuration files <tt>./blaze/config/Padding.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code #define BLAZE_DEFAULT_PADDING_FLAG blaze::padded \endcode // Alternatively it is possible to (de-)activate padding via command line or by defining this // symbol manually before including any \b Blaze header file: \code g++ ... -BLAZE_DEFAULT_PADDING_FLAG=blaze::padded ... \endcode \code #define BLAZE_DEFAULT_PADDING_FLAG blaze::padded #include <blaze/Blaze.h> \endcode // If \c BLAZE_DEFAULT_PADDING_FLAG is set to \c blaze::padded, by default padding is enabled for // \ref vector_types_static_vector, \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix, // and \ref matrix_types_hybrid_matrix. If it is set to \c blaze::unpadded, then padding is by // default disabled. Note however that disabling padding can considerably reduce the performance // of all dense vector and matrix operations! // // // <hr> // \section faq_alignment Despite disabling padding, a StaticVector/StaticMatrix is still larger than expected. Is this a bug? // // Despite disabling padding via the \c BLAZE_DEFAULT_PADDING_FLAG compile time switch (see // \ref faq_padding), the size of a \ref vector_types_static_vector, \ref matrix_types_static_matrix, // \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix can still be larger than // expected: \code #define BLAZE_DEFAULT_PADDING_FLAG blaze::unpadded #include <blaze/Blaze.h> StaticVector<int,3> a; StaticVector<int,5> b; sizeof( a ); // Always evaluates to 12 sizeof( b ); // Evaluates to 32 with SSE (larger than expected) and to 20 with AVX or AVX-512 (expected) \endcode // The reason for this behavior is the used SIMD technology. If SSE is used, which provides 128 // bit wide registers, a single SIMD pack can usually hold 4 integers (128 bit divided by 32 bit). // Since the second vector contains enough elements is possible to benefit from vectorization. // However, SSE requires an alignment of 16 bytes, which ultimately results in a total size of // 32 bytes for the \c StaticVector (2 times 16 bytes due to 5 integer elements). If AVX or AVX-512 // is used, which provide 256 bit or 512 bit wide registers, a single SIMD vector can hold 8 or 16 // integers, respectively. Even the second vector does not hold enough elements to benefit from // vectorization, which is why \b Blaze does not enforce a 32 byte (for AVX) or even 64 byte // alignment (for AVX-512). // // It is possible to disable the SIMD-specific alignment for \ref vector_types_static_vector, // \ref matrix_types_static_matrix, \ref vector_types_hybrid_vector, or \ref matrix_types_hybrid_matrix // via the compile time switch in the <tt>./blaze/config/Alignment.h</tt> configuration file: \code #define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned \endcode // Alternatively it is possible set the default alignment flag via command line or by defining // this symbol manually before including any \b Blaze header file: \code g++ ... -DBLAZE_DEFAULT_ALIGNMENT_FLAG=blaze::aligned ... \endcode \code #define BLAZE_DEFAULT_ALIGNMENT_FLAG blaze::aligned #include <blaze/Blaze.h> \endcode // If \c BLAZE_DEFAULT_ALIGNMENT_FLAG is set to \c blaze::aligned then \ref vector_types_static_vector, // \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix // use aligned memory by default. If it is set to \c blaze::unaligned they don't enforce aligned // memory. Note however that disabling alignment can considerably reduce the performance of all // operations with these vector and matrix types! // // Alternatively it is possible to disable the vectorization entirely by the compile time switch // in the <tt>./blaze/config/Vectorization.h</tt> configuration file: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code g++ ... -DBLAZE_USE_VECTORIZATION=1 ... \endcode \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics and the necessary alignment to speed up computations. In case the switch is // set to 0, vectorization is disabled entirely and the \b Blaze library chooses default, // non-vectorized functionality for the operations. Note that deactivating the vectorization may // pose a severe performance limitation for a large number of operations! // // // <hr> // \section faq_std_vector I experience crashes when using StaticVector/StaticMatrix in a std::vector. Is this a bug? // // With active vectorization the elements of a \ref vector_types_static_vector, // \ref vector_types_hybrid_vector, \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix // are possibly over-aligned to meet the alignment requirements of the available instruction set // (SSE, AVX, AVX-512, ...). The alignment for fundamental types (\c short, \c int, \c float, // \c double, ...) and complex types (\c complex<float>, \c complex<double>, ...) is 16 bytes // for SSE, 32 bytes for AVX, and 64 bytes for AVX-512. All other types are aligned according to // their intrinsic alignment: \code struct Int { int i; }; using VT1 = blaze::StaticVector<double,3UL>; using VT2 = blaze::StaticVector<complex<float>,2UL>; using VT3 = blaze::StaticVector<Int,5UL>; alignof( VT1 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT2 ); // Evaluates to 16 for SSE, 32 for AVX, and 64 for AVX-512 alignof( VT3 ); // Evaluates to 'alignof( Int )' \endcode // For this reason \ref vector_types_static_vector, \ref vector_types_hybrid_vector, // \ref matrix_types_static_matrix, and \ref matrix_types_hybrid_matrix cannot be used in // containers using dynamic memory such as \c std::vector without additionally providing an // allocator that can provide over-aligned memory: \code using Type = blaze::StaticVector<double,3UL>; using Allocator = blaze::AlignedAllocator<Type>; std::vector<Type> v1; // Might be misaligned for AVX or AVX-512 std::vector<Type,Allocator> v2; // Properly aligned for AVX or AVX-512 \endcode // It is possible to disable the vectorization entirely by the compile time switch in the // <tt>./blaze/config/Vectorization.h</tt> configuration file: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code g++ ... -DBLAZE_USE_VECTORIZATION=1 ... \endcode \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics and the necessary alignment to speed up computations. In case the switch is // set to 0, vectorization is disabled entirely and the \b Blaze library chooses default, // non-vectorized functionality for the operations. Note that deactivating the vectorization may // pose a severe performance limitation for a large number of operations! // // // <hr> // \section faq_blas To which extend does Blaze make use of BLAS functions under the hood? // // Currently the only BLAS functions that are utilized by \b Blaze are the \c gemm() functions // for the multiplication of two dense matrices (i.e. \c sgemm(), \c dgemm(), \c cgemm(), and // \c zgemm()). All other operations are always and unconditionally performed by native \b Blaze // kernels. // // The \c BLAZE_BLAS_MODE config switch (see <tt>./blaze/config/BLAS.h</tt>) determines whether // \b Blaze is allowed to use BLAS kernels. If \c BLAZE_BLAS_MODE is set to 0 then \b Blaze // does not utilize the BLAS kernels and unconditionally uses its own custom kernels. If // \c BLAZE_BLAS_MODE is set to 1 then \b Blaze is allowed to choose between using BLAS kernels // or its own custom kernels. In case of the dense matrix multiplication this decision is based // on the size of the dense matrices. For large matrices, \b Blaze uses the BLAS kernels, for // small matrices it uses its own custom kernels. The threshold for this decision can be // configured via the \c BLAZE_DMATDMATMULT_THRESHOLD, \c BLAZE_DMATTDMATMULT_THRESHOLD, // \c BLAZE_TDMATDMATMULT_THRESHOLD and \c BLAZE_TDMATTDMATMULT_THRESHOLD config switches // (see <tt>./blaze/config/Thresholds.h</tt>). // // Please note that the extend to which \b Blaze uses BLAS kernels can change in future releases // of \b Blaze! // // // <hr> // \section faq_lapack To which extend does Blaze make use of LAPACK functions under the hood? // // \b Blaze uses LAPACK functions for matrix decomposition, matrix inversion, computing the // determinants and eigenvalues, and the SVD. In contrast to the BLAS functionality (see // \ref faq_blas), you cannot disable LAPACK or switch to custom kernels. In case you try to // use any of these functionalities, but do not provide (i.e. link) a LAPACK library you will // get link time errors. // // Please note that the extend to which \b Blaze uses LAPACK kernels can change in future releases // of \b Blaze! // // // <hr> // \section faq_sparse_matrix_setup What is the fastest way to setup a very large sparse matrix? // // The following examples give an overview of different approaches to setup a sparse, row-major NxN // matrix with the following pattern, where all values on the diagonal and the two sub-diagonals // are filled: \f[\left(\begin{array}{*{9}{c}} 1 & 1 & 0 & 0 & 0 & \cdots & 0 & 0 & 0 \\ 1 & 1 & 1 & 0 & 0 & \cdots & 0 & 0 & 0 \\ 0 & 1 & 1 & 1 & 0 & \cdots & 0 & 0 & 0 \\ 0 & 0 & 1 & 1 & 1 & \cdots & 0 & 0 & 0 \\ 0 & 0 & 0 & 1 & 1 & \cdots & 0 & 0 & 0 \\ \vdots & \vdots & \vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \vdots \\ 0 & 0 & 0 & 0 & 0 & \cdots & 1 & 1 & 0 \\ 0 & 0 & 0 & 0 & 0 & \cdots & 1 & 1 & 1 \\ 0 & 0 & 0 & 0 & 0 & \cdots & 0 & 1 & 1 \\ \end{array}\right)\f] // Special emphasis is given to the runtime until the matrix setup is complete. In all cases the // runtime is benchmarked with Clang-9.0 (compilation flags \c -O2 and \c -DNDEBUG) for \c N=200000. // // // <b>Approach 1: Using the function call operator</b> // // In this approach the function call operator (i.e. \c operator()) is used to insert the according // elements into the matrix: \code blaze::CompressedMatrix<int,rowMajor> A( N, N ); A.reserve( N*3UL-2UL ); // Optional: Reserve capacity for all elements upfront for( size_t i=0; i<N; ++i ) { const size_t jbegin( i == 0UL ? 0UL : i-1UL ); const size_t jend ( i == N-1UL ? N-1UL : i+1UL ); for( size_t j=jbegin; j<=jend; ++j ) { A(i,j) = 1; } } \endcode // This approach is the most general and convenient, but also the slowest of all (approx. \b 64 // seconds). With every call to \c operator(), a new element is inserted at the specified position. // This implies shifting all subsequent elements and adapting every subsequent row. Since all // non-zero elements are stored in a single array inside a \c CompressedMatrix, this approach is // similar to inserting elements at the front of a \c std::vector; all subsequent elements have // to be shifted. // // // <b>Approach 2: Rowwise reserve and insert</b> // // The next approach performs a rowwise reservation of capacity: \code blaze::CompressedMatrix<int,rowMajor> A( N, N ); A.reserve( N*3UL ); // Allocate the total amount of memory A.reserve( 0UL, 2UL ); // Reserve a capacity of 2 for row 0 for( size_t i=1; i<N-1UL; ++i ) { A.reserve( i, 3UL ); // Reserve a capacity of 3 for row i } A.reserve( N-1UL, 2UL ); // Reserve a capacity of 2 for the last row for( size_t i=0; i<N; ++i ) { const size_t jbegin( i == 0UL ? 0UL : i-1UL ); const size_t jend ( i == N-1UL ? N-1UL : i+1UL ); for( size_t j=jbegin; j<=jend; ++j ) { A.insert( i, j, 1 ); } } \endcode // The first call to reserve() performs the memory allocation for the entire matrix. The complete // matrix now holds the entire capacity, but each single row has a capacity of 0. Therefore the // subsequent calls to \c reserve() divide the existing capacity to all rows. // // Unfortunately, also this approach is rather slow. The runtime is approx. \b 30 seconds. The // downside of this approach is that changing the capacity of a single row causes a change in // all following rows. Therefore this approach is similar to the first approach. // // // <b>Approach 3: reserve/append/finalize</b> // // As the wiki explains, the most efficient way to fill a sparse matrix is a combination of // \c reserve(), \c append() and \c finalize(): \code CompressedMatrix<int,rowMajor> A( N, N ); A.reserve( N*3UL ); for( size_t i=0; i<N; ++i ) { const size_t jbegin( i == 0UL ? 0UL : i-1UL ); const size_t jend ( i == N-1UL ? N-1UL : i+1UL ); for( size_t j=jbegin; j<=jend; ++j ) { A.append( i, j, 1 ); } A.finalize( i ); } \endcode // The initial call to \c reserve() allocates enough memory for all non-zero elements of the // entire matrix. \c append() and \c finalize() are then used to insert the elements and to mark // the end of each single row. This is a very low-level approach and very similar to writing to // an array manually, which results in a mere \b 0.026 seconds. The \c append() function writes // the new element to the next memory location, and at the end of each row or column the // \c finalize() function sets the internal pointers accordingly. It is very important to note // that the \c finalize() function has to be explicitly called for each row, even for empty ones! // Else the internal data structure will be corrupt! Also note that although \c append() does not // allocate new memory, it still invalidates all iterators returned by the \c end() functions! // // // <b>Approach 4: Reservation via the constructor</b> // // In case the number of non-zero elements is known upfront, it is also possible to perform the // reservation via the constructor of \c CompressedMatrix. For that purpose \c CompressedMatrix // provides a constructor taking a \c std::vector<size_t>: \code std::vector<size_t> nonzeros( N, 3UL ); // Create a vector of N elements with value 3 nonzeros[ 0] = 2UL; // We need only 2 elements in the first row ... nonzeros[N-1] = 2UL; // ... and last row. CompressedMatrix<int,rowMajor> A( N, N, nonzeros ); //std::cerr << " Inserting values...\n"; for( size_t i=0; i<N; ++i ) { const size_t jbegin( i == 0UL ? 0UL : i-1UL ); const size_t jend ( i == N-1UL ? N-1UL : i+1UL ); for( size_t j=jbegin; j<=jend; ++j ) { A.insert( i, j, 1 ); } } \endcode // The runtime for this approach is \b 0.027 seconds. // // // <hr> // \section faq_compile_times The compile time is too high if I include <blaze/Blaze.h>. Can I reduce it? // // The include file <tt><blaze/Blaze.h></tt> includes the entire functionality of the \b Blaze // library, which by now is several hundred thousand lines of source code. That means that a lot // of source code has to be parsed whenever <tt><blaze/Blaze.h></tt> is encountered. However, it // is rare that everything is required within a single compilation unit. Therefore it is easily // possible to reduce compile times by including only those \b Blaze features that are used within // the compilation unit. For instance, instead of including <tt><blaze/Blaze.h></tt> it could be // enough to include <tt><blaze/math/DynamicVector.h></tt>, which would reduce the compilation // times by about 20%. // // Additionally we are taking care to implement new \b Blaze functionality such that compile times // do not explode and try to reduce the compile times of existing features. Thus newer releases of // \b Blaze can also improve compile times. // // // <hr> // \section faq_custom_operations Blaze does not provide feature XYZ. What can I do? // // In some cases you might be able to implement the required functionality very conveniently by // building on the existing \c map() functions (see \ref custom_operations_map). For instance, // the following code demonstrates the addition of a function that merges two vectors of floating // point type into a vector of complex numbers: \code template< typename VT1, typename VT2, bool TF > decltype(auto) zip( const blaze::DenseVector<VT1,TF>& lhs, const blaze::DenseVector<VT2,TF>& rhs ) { return blaze::map( ~lhs, ~rhs, []( const auto& r, const auto& i ) { using ET1 = ElementType_t<VT1>; using ET2 = ElementType_t<VT2>; return std::complex<std::common_type_t<ET1,ET2>>( r, i ); } ); } \endcode // You will find a summary of the necessary steps to create custom features in \ref customization. // // Sometimes, however, the available customization points might not be sufficient. In this case // you are cordially invited to create a pull request that provides the implementation of a // feature or to create an issue according to our \ref issue_creation_guidelines. Please try // to explain the feature as descriptive as possible, for instance by providing conceptual code // examples. // // \n Previous: \ref intra_statement_optimization &nbsp; &nbsp; Next: \ref issue_creation_guidelines \n */ //************************************************************************************************* //**FAQ******************************************************************************************** /*!\page issue_creation_guidelines Issue Creation Guidelines // // \tableofcontents // // // One of the most important aspects of the \b Blaze project is the // <a href="https://bitbucket.org/blaze-lib/blaze/issues">issue management</a> on the official // \b Blaze Bitbucket page. We cordially invite all \b Blaze users to submit feature requests // and bug reports, as we believe that this is a significant part of making \b Blaze a better // library. However, we are asking to follow a small set of guidelines when creating an issue // to facilitate the issue management on our side and also to make issues more useful for users // of \b Blaze. // // // <hr> // \section issues_title Title // // The title is the most important detail of an issue. A well chosen title makes it easy to grasp // the idea of an issue and improves the discoverability. Therefore, please choose a title that // is ... // // - ... as descriptive as possible; // - ... as concise as possible; // - ... as unambiguous as possible. // // Also, please create a separate issue for each idea/problem/etc. A very general title or an // \"and\" in the title could be an indication that the issue is not specific enough and should // be split into several issues. // // \subsection issues_title_good_examples Good Examples // // - \"Provide support for AVX-512 SIMD operations\" // - \"Add support for the Boost Multiprecision Library\" // - \"Introduce reduction operations into Blaze\" // - \"Compilation error on KNL with -march=knl\" // // \subsection issues_title_bad_examples Bad Examples // // - \"Several requests\" (instead create separate issues for each single request) // - \"Improve the performance\" (instead specify which operation should perform better) // - \"Blaze library compilation error\" (instead try to be more specific) // // // <hr> // \section issues_description Description // // The description should help us to understand your idea or problem in as much detail as possible. // Also, it helps to clearly spell out your expectations (how a feature is supposed to work, how // the behavior should be, etc.). Please spend a couple of minutes to try to make the description // as comprehensive as possible. // // // <hr> // \section issues_assignee Assignee // // There is no need to assign the issue to a particular person. It is perfectly ok if you just // ignore this setting. // // // <hr> // \section issues_kind Kind of Issue // // There are four kinds of issues available in the Bitbucket issue tracker: \ref issues_kind_bug, // \ref issues_kind_enhancement, \ref issues_kind_proposal, and \ref issues_kind_task. In the // following we try to give guidelines on which kind to choose for a particular issue: // // \subsection issues_kind_bug Bug // // Please choose the category \ref issues_kind_bug if ... // // - ... you experience a compilation error despite your best efforts to get it right; // - ... you experience a crash/failure despite your best efforts to get it right; // - ... you experience problems when combining features; // - ... a feature does not work as specified/documented (i.e. can be considered broken). // // Please \b don't choose the category \ref issues_kind_bug if ... // // - ... you feel a feature should work differently than it currently does (instead create a // \ref issues_kind_proposal with a convincing title and description); // - ... you are not sure how to use a feature (instead create an \ref issues_kind_enhancement // issue to extend the documentation); // - ... you are missing a feature (instead create a \ref issues_kind_proposal or // \ref issues_kind_enhancement issue). // // If you select the category \ref issues_kind_bug, please also try to provide a minimum example // that fails. That helps us to minimize the time to resolve the bug. // // As we try to keep \b Blaze bug-free, we will always prioritize bug issues. However, we will // also quickly close bug issues as \"wontfix\" if the described issue is not a bug (i.e. one of // the problems mentioned above). We will \b not relabel a bug issue to \ref issues_kind_enhancement // or \ref issues_kind_proposal, even if they would be reasonable extensions to \b Blaze. // // \subsection issues_kind_enhancement Enhancement // // Please choose the category \ref issues_kind_enhancement if ... // // - ... you need an add-on to an existing feature; // - ... you need an extension of an existing feature; // - ... you need an extended documentation for an existing feature. // // \ref issues_kind_enhancement is very similar to \ref issues_kind_proposal, so we don't mind // if an \ref issues_kind_enhancement is labeled as a \ref issues_kind_proposal or vice versa. // Just make sure you don't request an extension or new feature as a \ref issues_kind_bug. // // \subsection issues_kind_proposal Proposal // // Please choose the category \ref issues_kind_proposal if ... // // - ... you want to request a new feature; // - ... you want to change an existing feature. // // \ref issues_kind_proposal is very similar to \ref issues_kind_enhancement, so we don't mind if // a \ref issues_kind_proposal is labeled as an \ref issues_kind_enhancement or vice versa. Just // make sure you don't request an extension or new feature as a \ref issues_kind_bug. // // \subsection issues_kind_task Task // // Please choose the category \ref issues_kind_task if ... // // - ... you want us to do something not feature related; // - ... you have something else in mind which does not fall in the other three categories. // // // <hr> // \section issues_priority Priority // // Via the priority of an issue you can tell us how important the issue is to you. Therefore the // priority can have an influence on when we will deal with the issue. However, unfortunately we // don't have an infinite amount of time and we can not deal with an arbitrary amount of issues // at the same time. We will therefore take the priority into account, but mainly schedule the // issues based on impact to all \b Blaze users and the estimated time to resolve it. // // You can choose between \ref issues_priority_blocker, \ref issues_priority_critical, // \ref issues_priority_major, \ref issues_priority_minor, and \ref issues_priority_trivial. // // \subsection issues_priority_blocker Blocker // // Please choose a \ref issues_priority_blocker priority if ... // // - ... you cannot work with \b Blaze due to the described \ref issues_kind_bug; // - ... the \ref issues_kind_bug likely has an influence on \b all \b Blaze users. // // Please note that the categories \ref issues_kind_enhancement or \ref issues_kind_proposal // should never be a \ref issues_priority_blocker! // // \subsection issues_priority_critical Critical // // Please choose a \ref issues_priority_critical priority if ... // // - ... you can work around a \ref issues_kind_bug, but the workaround is (much) slower or awful; // - ... you cannot use \b Blaze without the proposed feature; // - ... you consider it to be essential for \b all \b Blaze users. // // \subsection issues_priority_major Major // // Please choose a \ref issues_priority_major priority if ... // // - ... a \ref issues_kind_bug or feature request is not \ref issues_priority_critical, but // still very important to you; // - ... you consider it to have a \ref issues_priority_major impact on most \b Blaze users. // // The \ref issues_priority_major category is the default setting in Bitbucket and we therefore // consider it as the default priority for issues. // // \subsection issues_priority_minor Minor // // Please choose a \ref issues_priority_minor priority if ... // // - ... a \ref issues_kind_bug does not affect many \b Blaze users; // - ... a feature request would only be useful for a small number of \b Blaze users; // - ... a feature would be nice to have, but is not particularly important. // // \subsection issues_priority_trivial Trivial // // Please choose a \ref issues_priority_trivial priority if ... // // - ... a \ref issues_kind_bug hardly affects anyone; // - ... a feature request would only be useful for very few \b Blaze users; // - ... the expected time to resolve an issue is very small. // // // <hr> // \section issues_attachment Attachments // // You can always provide us with additional information in the form of attachments. Feel free // to attach something to the issue if ... // // - ... it can help us to analyze a \ref issues_kind_bug; // - ... you have some source code that demonstrates a problem; // - ... you already have a working prototype that sketches the idea; // - ... you have additional resources that could help us. // // We appreciate anything that simplifies our work and speeds up our progress. // // \n Previous: \ref faq &nbsp; &nbsp; Next: \ref blaze_references \n */ //************************************************************************************************* //**Blaze References******************************************************************************* /*!\page blaze_references Blaze References // // In case you need references to the \b Blaze library (for papers or other publications), please // feel free to use one of the following references: \code @misc{blazelib, author = "Klaus {Iglberger}", title = "Blaze C++ Linear Algebra Library", howpublished = "https://bitbucket.org/blaze-lib", year = 2012 } \endcode \code @article{iglberger2012_1, author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "Expression Templates Revisited: A Performance Analysis of Current Methodologies", journal = "SIAM Journal on Scientific Computing", year = 2012, volume = 34(2), pages = C42--C69 } \endcode \code @inproceedings{iglberger2012_2, author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "High Performance Smart Expression Template Math Libraries", booktitle = "Proceedings of the 2nd International Workshop on New Algorithms and Programming Models for the Manycore Era (APMM 2012) at HPCS 2012", year = 2012 } \endcode // \n Previous: \ref issue_creation_guidelines */ //************************************************************************************************* #endif
trmv_c_csr_u_lo_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> #include <stdlib.h> static alphasparse_status_t trmv_x_csr_u_lo_conj_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mule(y[i], beta); alpha_madde(y[i], alpha, x[i]); } ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT); for(ALPHA_INT i = 0; i < num_threads; i++) { y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT); memset(y_local[i], '\0', sizeof(ALPHA_Number) * m); } #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0; i < m; ++i) { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_Number tmp; for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i]; ++ai) { const ALPHA_INT col = A->col_indx[ai]; if(col < i) { alpha_setzero(tmp); cmp_conj(tmp, A->values[ai]); alpha_mul(tmp, alpha, tmp); alpha_madde(y_local[tid][col], tmp, x[i]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT row = 0; row < m; row++) for(ALPHA_INT i = 0; i < num_threads; i++) alpha_adde(y[row], y_local[i][row]); for(ALPHA_INT i = 0; i < num_threads; i++) { alpha_free(y_local[i]); } alpha_free(y_local); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return trmv_x_csr_u_lo_conj_omp(alpha, A, x, beta, y); }
GrB_Matrix_ncols.c
//------------------------------------------------------------------------------ // GrB_Matrix_ncols: number of columns of a sparse matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GrB_Matrix_ncols // get the number of columns of a matrix ( GrB_Index *ncols, // matrix has ncols columns const GrB_Matrix A // matrix to query ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GrB_Matrix_ncols (&ncols, A)") ; GB_RETURN_IF_NULL (ncols) ; GB_RETURN_IF_NULL_OR_FAULTY (A) ; //-------------------------------------------------------------------------- // return the number of columns //-------------------------------------------------------------------------- (*ncols) = GB_NCOLS (A) ; #pragma omp flush return (GrB_SUCCESS) ; }
lu.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include "nb/math_bot.h" #include "nb/memory_bot.h" #include "nb/container_bot.h" #include "nb/graph_bot.h" #include "nb/solver_bot.h" #include "../sparse_struct.h" #include "cholesky_symbolic.h" int nb_sparse_alloc_LU(const nb_sparse_t *const restrict A, nb_sparse_t** L, nb_sparse_t** U) { *L = nb_sparse_allocate(A->N); *U = nb_sparse_allocate(A->N); nb_sparse_cholesky_symbolic(A, *L, *U, A->N); return 0; } void nb_sparse_decompose_LU(const nb_sparse_t *const Ar, nb_sparse_t *L, nb_sparse_t* U, uint32_t omp_parallel_threads) { /* Create Ut to compute faster the decomposition */ nb_sparse_t* Ut = nb_sparse_clone(L); /* Compute the decomposition */ for (uint32_t j = 0; j < Ar->N; j++) { L->rows_values[j][L->rows_size[j]-1] = 1.0; U->rows_values[j][0] = nb_sparse_get(Ar, j, j); double sum = 0; #pragma omp parallel for schedule(guided) reduction(+:sum) num_threads(omp_parallel_threads) for (uint32_t q = 0; q < L->rows_size[j]-1; q++) sum += L->rows_values[j][q] * Ut->rows_values[j][q]; U->rows_values[j][0] -= sum; Ut->rows_values[j][Ut->rows_size[j]-1] = U->rows_values[j][0]; #pragma omp parallel for schedule(guided) num_threads(omp_parallel_threads) for (uint32_t q = 1; q < U->rows_size[j]; q++) { uint32_t i = U->rows_index[j][q]; /*** L_ij <- A_ij ******************************************************/ uint32_t L_jindex = nb_sparse_bsearch_row(L, i, j, 0, L->rows_size[i]-1);/**/ L->rows_values[i][L_jindex] = nb_sparse_get(Ar, i, j); /**/ /***********************************************************************/ /*** U_ji <- A_ji ******************************************************/ U->rows_values[j][q] = nb_sparse_get(Ar, j, i); /**/ /***********************************************************************/ register uint32_t r = 0; register uint32_t s = 0; register uint32_t _ro = L->rows_index[i][r]; register uint32_t _sigma = L->rows_index[j][s]; bool flag = true; /* Flag to know when to stop the cylce */ while (flag) { while (_ro < _sigma) _ro = L->rows_index[i][++r]; while (_ro > _sigma) _sigma = L->rows_index[j][++s]; while (_ro == _sigma) { if (_ro == j) { flag = false; /* Finish the cycle */ break; } double vir = L->rows_values[i][r]; double vjs = Ut->rows_values[j][s]; L->rows_values[i][L_jindex] -= vir*vjs; vjs = L->rows_values[j][s]; vir = Ut->rows_values[i][r]; U->rows_values[j][q] -= vir * vjs; _ro = L->rows_index[i][++r]; _sigma = L->rows_index[j][++s]; } } L->rows_values[i][L_jindex] /= U->rows_values[j][0]; Ut->rows_values[i][L_jindex] = U->rows_values[j][q]; } } /* Free memory */ nb_sparse_destroy(Ut); } void nb_sparse_solve_LU(const nb_sparse_t *const L, const nb_sparse_t *const U, const double *const b, double* _x /* Out */) { double* z = nb_allocate_zero_mem(L->N * sizeof(*z)); nb_sparse_forward_solve(L, b, z); nb_sparse_backward_solve(U, z, _x); nb_free_mem(z); } int nb_sparse_solve_using_LU(const nb_sparse_t *const A, const double *const b, double* x, /* Out */ uint32_t omp_parallel_threads) { nb_sparse_t *L = NULL; nb_sparse_t *U = NULL; nb_sparse_alloc_LU(A, &L, &U); if(NULL == L) return 1; nb_sparse_decompose_LU(A, L, U, omp_parallel_threads); nb_sparse_solve_LU(L, U, b, x); nb_sparse_destroy(L); nb_sparse_destroy(U); return 0; } int nb_sparse_relabel_and_solve_using_LU(const nb_sparse_t *const A, const double *const b, double* x, /* Out */ uint32_t omp_parallel_threads) { uint32_t N = nb_sparse_get_size(A); uint32_t memsize = 2 * N * (sizeof(uint32_t) + sizeof(double)); char *memblock = nb_soft_allocate_mem(memsize); uint32_t *perm = (void*) memblock; uint32_t *iperm = (void*) (memblock + N * sizeof(uint32_t)); double *br = (void*) (memblock + 2 * N * sizeof(uint32_t)); double *xr = (void*) (memblock + 2 * N * sizeof(uint32_t) + N * sizeof(double)); nb_sparse_calculate_permutation(A, perm, iperm); nb_sparse_t *Ar = nb_sparse_create_permutation(A, perm, iperm); nb_vector_permutation(N, b, perm, br); int status = nb_sparse_solve_using_LU(Ar, br, xr, omp_parallel_threads); nb_vector_permutation(N, xr, iperm, x); nb_sparse_destroy(Ar); nb_soft_free_mem(memsize, memblock); return status; } double nb_sparse_relabel_and_get_det_sign_using_LU(const nb_sparse_t *A) { uint32_t N = nb_sparse_get_size(A); uint32_t memsize = 2 * N * (sizeof(uint32_t)); char *memblock = nb_soft_allocate_mem(memsize); uint32_t *perm = (void*) memblock; uint32_t *iperm = (void*) (memblock + N * sizeof(uint32_t)); nb_sparse_calculate_permutation(A, perm, iperm); nb_sparse_t *Ar = nb_sparse_create_permutation(A, perm, iperm); nb_sparse_t *Lr = NULL; nb_sparse_t *Ur = NULL; nb_sparse_alloc_LU(Ar, &Lr, &Ur); if(NULL == Lr) return 0; nb_sparse_decompose_LU(Ar, Lr, Ur, 1); double det = nb_sparse_triangular_get_det_sign(Ur); nb_sparse_destroy(Ar); nb_sparse_destroy(Lr); nb_sparse_destroy(Ur); nb_soft_free_mem(memsize, memblock); return det; }
BPMaximalMatching.h
#include "../CombBLAS.h" #include <mpi.h> #include <sys/time.h> #include <iostream> #include <functional> #include <algorithm> #include <vector> #include <string> #include <sstream> #include "Utility.h" #define NO_INIT 0 #define GREEDY 1 #define KARP_SIPSER 2 #define DMD 3 using namespace std; MTRand GlobalMT(123); // for reproducible result string matrix_name=""; struct VertexType1 { public: VertexType1(int64_t p=-1, int64_t com=0){parent=p; comp = com; }; friend bool operator<(const VertexType1 & vtx1, const VertexType1 & vtx2 ) { return vtx1.comp<vtx2.comp; }; friend bool operator==(const VertexType1 & vtx1, const VertexType1 & vtx2 ){return vtx1.comp==vtx2.comp;}; friend ostream& operator<<(ostream& os, const VertexType1 & vertex ){os << "(" << vertex.parent << "," << vertex.comp << ")"; return os;}; int64_t parent; int64_t comp; // can be index, probability or degree }; template <typename T1, typename T2> struct SelectPlusSR { static T2 id(){ return 1; }; static bool returnedSAID() { return false; } static MPI_Op mpi_op() { return MPI_SUM; }; static T2 add(const T2 & arg1, const T2 & arg2) { return std::plus<T2>()(arg1, arg2); } static T2 multiply(const T1 & arg1, const T2 & arg2) { return static_cast<T2> (1); // note: it is not called on a Boolean matrix } static void axpy(const T1 a, const T2 & x, T2 & y) { y = add(y, multiply(a, x)); } }; template <typename T1, typename T2> struct Select2ndMinSR { static T2 id(){ return T2(); }; static bool returnedSAID() { return false; } static MPI_Op mpi_op() { return MPI_MIN; }; static T2 add(const T2 & arg1, const T2 & arg2) { return std::min(arg1, arg2); } static T2 multiply(const T1 & arg1, const T2 & arg2) { return arg2; } static void axpy(const T1 a, const T2 & x, T2 & y) { y = add(y, multiply(a, x)); } }; typedef SpParMat < int64_t, bool, SpDCCols<int64_t,bool> > PSpMat_Bool; typedef SpParMat < int64_t, int64_t, SpDCCols<int64_t,int64_t> > PSpMat_Int64; typedef SpParMat < int64_t, bool, SpDCCols<int32_t,bool> > PSpMat_s32p64; void MaximalMatching(PSpMat_s32p64 & A, PSpMat_s32p64 & AT, FullyDistVec<int64_t, int64_t>& mateRow2Col, FullyDistVec<int64_t, int64_t>& mateCol2Row, FullyDistVec<int64_t, int64_t>& degColRecv, int type, bool rand=true) { int nprocs, myrank; MPI_Comm_size(MPI_COMM_WORLD,&nprocs); MPI_Comm_rank(MPI_COMM_WORLD,&myrank); int nthreads = 1; #ifdef _OPENMP #pragma omp parallel { nthreads = omp_get_num_threads(); } #endif FullyDistVec<int64_t, int64_t> degCol = degColRecv; //unmatched row and column vertices FullyDistSpVec<int64_t, int64_t> unmatchedRow(mateRow2Col, [](int64_t mate){return mate==-1;}); FullyDistSpVec<int64_t, int64_t> degColSG(A.getcommgrid(), A.getncol()); //FullyDistVec<int64_t, int64_t> degCol(A.getcommgrid()); //A.Reduce(degCol, Column, plus<int64_t>(), static_cast<int64_t>(0)); // Reduce is not multithreaded FullyDistSpVec<int64_t, VertexType1> unmatchedCol(A.getcommgrid(), A.getncol()); // every veretx is unmatched. keep non-isolated vertices unmatchedCol = EWiseApply<VertexType1>(unmatchedCol, degCol, [](VertexType1 vtx, int64_t deg){return VertexType1();}, [](VertexType1 vtx, int64_t deg){return deg>0;}, true, VertexType1()); FullyDistSpVec<int64_t, VertexType1> fringeRow(A.getcommgrid(), A.getnrow()); FullyDistSpVec<int64_t, int64_t> fringeRow2(A.getcommgrid(), A.getnrow()); FullyDistSpVec<int64_t, VertexType1> deg1Col(A.getcommgrid(), A.getncol()); int64_t curUnmatchedCol = unmatchedCol.getnnz(); int64_t curUnmatchedRow = unmatchedRow.getnnz(); int64_t newlyMatched = 1; // ensure the first pass of the while loop int iteration = 0; double tStart = MPI_Wtime(); vector<vector<double> > timing; #ifdef DETAIL_STATS if(myrank == 0) { cout << "=======================================================\n"; cout << "@@@@@@ Number of processes: " << nprocs << endl; cout << "=======================================================\n"; cout << "It | UMRow | UMCol | newlyMatched | Time "<< endl; cout << "=======================================================\n"; } #endif MPI_Barrier(MPI_COMM_WORLD); while(curUnmatchedCol !=0 && curUnmatchedRow!=0 && newlyMatched != 0 ) { unmatchedCol.ApplyInd([](VertexType1 vtx, int64_t idx){return VertexType1(idx,idx);}); if(type==DMD) { unmatchedCol = EWiseApply<VertexType1>(unmatchedCol, degCol, [](VertexType1 vtx, int64_t deg){return VertexType1(vtx.parent,deg);}, [](VertexType1 vtx, int64_t deg){return true;}, false, VertexType1()); } else if(rand) { unmatchedCol.Apply([](VertexType1 vtx){return VertexType1(vtx.parent, static_cast<int64_t>((GlobalMT.rand() * 9999999)+1));}); } // ======================== step1: One step of BFS ========================= vector<double> times; double t1 = MPI_Wtime(); if(type==GREEDY) { SpMV<Select2ndMinSR<bool, VertexType1>>(A, unmatchedCol, fringeRow, false); } else if(type==DMD) { SpMV<Select2ndMinSR<bool, VertexType1>>(A, unmatchedCol, fringeRow, false); } else //(type==KARP_SIPSER) { deg1Col = EWiseApply<VertexType1>(unmatchedCol, degCol, [](VertexType1 vtx, int64_t deg){return vtx;}, [](VertexType1 vtx, int64_t deg){return deg==1;}, false, VertexType1()); if(deg1Col.getnnz()>9) SpMV<Select2ndMinSR<bool, VertexType1>>(A, deg1Col, fringeRow, false); else SpMV<Select2ndMinSR<bool, VertexType1>>(A, unmatchedCol, fringeRow, false); } // Remove matched row vertices fringeRow = EWiseApply<VertexType1>(fringeRow, mateRow2Col, [](VertexType1 vtx, int64_t mate){return vtx;}, [](VertexType1 vtx, int64_t mate){return mate==-1;}, false, VertexType1()); if(myrank == 0){times.push_back(MPI_Wtime()-t1); t1 = MPI_Wtime();} // =========================================================================== // ======================== step2: Update matching ========================= fringeRow2 = EWiseApply<int64_t>(fringeRow, mateRow2Col, [](VertexType1 vtx, int64_t mate){return vtx.parent;}, [](VertexType1 vtx, int64_t mate){return true;}, false, VertexType1()); FullyDistSpVec<int64_t, int64_t> newMatchedCols = fringeRow2.Invert(A.getncol()); FullyDistSpVec<int64_t, int64_t> newMatchedRows = newMatchedCols.Invert(A.getnrow()); mateCol2Row.Set(newMatchedCols); mateRow2Col.Set(newMatchedRows); if(myrank == 0){times.push_back(MPI_Wtime()-t1); t1 = MPI_Wtime();} // =========================================================================== // =============== step3: Update degree of unmatched columns ================= unmatchedRow.Select(mateRow2Col, [](int64_t mate){return mate==-1;}); unmatchedCol.Select(mateCol2Row, [](int64_t mate){return mate==-1;}); if(type!=GREEDY) { // update degree newMatchedRows.Apply([](int64_t val){return 1;}); // needed if the matrix is Boolean since the SR::multiply isn't called SpMV< SelectPlusSR<bool, int64_t>>(AT, newMatchedRows, degColSG, false); // degree of column vertices to matched rows // subtract degree of column vertices degCol.EWiseApply(degColSG, [](int64_t old_deg, int64_t new_deg){return old_deg-new_deg;}, [](int64_t old_deg, int64_t new_deg){return true;}, false, static_cast<int64_t>(0)); // remove isolated vertices unmatchedCol = EWiseApply<VertexType1>(unmatchedCol, degCol, [](VertexType1 vtx, int64_t deg){return vtx;}, [](VertexType1 vtx, int64_t deg){return deg>0;}, false, VertexType1()); } if(myrank == 0){times.push_back(MPI_Wtime()-t1); t1 = MPI_Wtime();} // =========================================================================== ++iteration; newlyMatched = newMatchedCols.getnnz(); times.push_back(std::accumulate(times.begin(), times.end(), 0.0)); timing.push_back(times); #ifdef DETAIL_STATS if(myrank == 0) { printf("%3d %10lld %10lld %10lld %18lf\n", iteration , curUnmatchedRow, curUnmatchedCol, newlyMatched, times.back()); } #endif curUnmatchedCol = unmatchedCol.getnnz(); curUnmatchedRow = unmatchedRow.getnnz(); MPI_Barrier(MPI_COMM_WORLD); } int64_t cardinality = mateRow2Col.Count([](int64_t mate){return mate!=-1;}); vector<double> totalTimes(timing[0].size(),0); for(int i=0; i<timing.size(); i++) { for(int j=0; j<timing[i].size(); j++) { totalTimes[j] += timing[i][j]; } } if(myrank == 0) { #ifdef DETAIL_STATS cout << "==========================================================\n"; cout << "\n================individual timings =======================\n"; cout << " SpMV Update-Match Update-UMC Total "<< endl; cout << "==========================================================\n"; for(int i=0; i<timing.size(); i++) { for(int j=0; j<timing[i].size(); j++) { printf("%12.5lf ", timing[i][j]); } cout << endl; } cout << "-------------------------------------------------------\n"; for(int i=0; i<totalTimes.size(); i++) printf("%12.5lf ", totalTimes[i]); cout << endl; #endif cout << "matrix nprocesses nthreads ncores algorithm Unmatched-Rows Cardinality Total Time***\n"; cout << matrix_name << " "; cout << nprocs << " " << nthreads << " " << nprocs * nthreads << " "; if(type == DMD) cout << "DMD"; else if(type == GREEDY) cout << "Greedy"; else if(type == KARP_SIPSER) cout << "Karp-Sipser"; if(rand && (type == KARP_SIPSER || type == GREEDY) ) cout << "-rand"; cout << " "; printf("%lld %lld %lf\n", curUnmatchedRow, cardinality, totalTimes.back()); cout << "-------------------------------------------------------\n\n"; } //isMatching(mateCol2Row, mateRow2Col); } template <class IT, class NT> bool isMaximalmatching(PSpMat_Bool & A, FullyDistVec<IT,NT> & mateRow2Col, FullyDistVec<IT,NT> & mateCol2Row) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); FullyDistSpVec<int64_t, int64_t> fringeRow(A.getcommgrid(), A.getnrow()); FullyDistSpVec<int64_t, int64_t> fringeCol(A.getcommgrid(), A.getncol()); FullyDistSpVec<int64_t, int64_t> unmatchedRow(mateRow2Col, [](int64_t mate){return mate==-1;}); FullyDistSpVec<int64_t, int64_t> unmatchedCol(mateCol2Row, [](int64_t mate){return mate==-1;}); unmatchedRow.setNumToInd(); unmatchedCol.setNumToInd(); SpMV<Select2ndMinSR<bool, VertexType1>>(A, unmatchedCol, fringeRow, false); fringeRow = EWiseMult(fringeRow, mateRow2Col, true, (int64_t) -1); if(fringeRow.getnnz() != 0) { if(myrank == 0) cout << "Not maximal matching!!\n"; return false; } PSpMat_Int64 tA = A; tA.Transpose(); SpMV<Select2ndMinSR<bool, VertexType1>>(tA, unmatchedRow, fringeCol, false); fringeCol = EWiseMult(fringeCol, mateCol2Row, true, (int64_t) -1); if(fringeCol.getnnz() != 0) { if(myrank == 0) cout << "Not maximal matching**!!\n"; return false; } return true; }
csr_graph.h
#pragma once #include <cassert> #include <fstream> #include <sstream> #include <fcntl.h> #include <iostream> #include <unistd.h> #include <algorithm> #include <sys/mman.h> #include "common.h" #include "custom_alloc.h" class VertexSet { private: VertexId *ptr; VertexId size_; public: VertexSet() : size_(0) {} VertexSet(VertexId *p, VertexId s) : ptr(p), size_(s) {} VertexId size() { return size_; } const VertexId* begin() const { return ptr; } const VertexId* end() const { return ptr + size_; } VertexId get_intersect_num(const VertexSet &other) const { VertexId num = 0; VertexId idx_l = 0, idx_r = 0; while(idx_l < size_ && idx_r < other.size_) { auto left = ptr[idx_l]; auto right = other.ptr[idx_r]; if(left <= right) idx_l++; if(right <= left) idx_r++; if(left == right) num++; } return num; } }; constexpr bool map_edges = false; // use mmap() instead of read() constexpr bool map_vertices = false; // use mmap() instead of read() struct Edge { VertexId src; VertexId dst; }; class Graph { private: bool directed; bool has_reverse; VertexId n_vertices, *edges, *reverse_edges; uint64_t n_edges, *vertices, *reverse_vertices; VertexId max_degree; std::vector<VertexList> adj_lists; // temporary adj list template<typename T> static void read_bin_file(std::string fname, T *& pointer, size_t elements) { pointer = custom_alloc_global<T>(elements); assert(pointer); std::ifstream inf(fname.c_str(), std::ios::binary); if(!inf.good()) { std::cerr << "Failed to open file: " << fname << "\n"; exit(1); } inf.read(reinterpret_cast<char*>(pointer), sizeof(T) * elements); inf.close(); } inline bool next_line(ifstream &infile, string &line, istringstream &iss, VertexId &src, VertexId &dest) { do { if(!getline(infile, line)) return false; } while(line.length() == 0 || line[0] == '#'); iss.clear(); iss.str(line); return !!(iss >> src >> dest); } void read_mtx_file(std::string fname, bool symmetrize = false, bool need_reverse = false) { std::cout << "Reading (.mtx) input file " << fname << "\n"; std::ifstream infile(fname.c_str()); if (!infile) { cout << "File not available\n"; throw 1; } std::string str; getline(infile, str); char c; sscanf(str.c_str(), "%c", &c); // skip header comments while (c == '%') { getline(infile, str); sscanf(str.c_str(), "%c", &c); } // read m, n, nnz int m, n; int64_t nnz; sscanf(str.c_str(), "%d %d %ld", &m, &n, &nnz); if (m != n) { printf("Warning, m(%d) != n(%d)\n", m, n); } //std::cout << "original |V| " << m << " |E| " << nnz << "\n"; n_vertices = m; n_edges = 0; string line; istringstream iss; VertexId edge[2]; //size_t lineNum = 0; adj_lists.resize(m); while (next_line(infile, line, iss, edge[0], edge[1])) { //if (++lineNum % 1000000 == 0) // printf("%lu edges read\n", lineNum); if (edge[0] == edge[1]) continue; // self_loop auto src = edge[0] - 1; auto dst = edge[1] - 1; adj_lists[src].push_back(dst); n_edges ++; if (symmetrize && src != dst) { adj_lists[dst].push_back(src); n_edges ++; } } infile.close(); fill_data(symmetrize, need_reverse, true, true); } void fill_data(bool symmetrize, bool need_reverse, bool sorted, bool remove_redundents) { //sort the neighbor list if (sorted) { //printf("Sorting the neighbor lists..."); for(int i = 0; i < n_vertices; i++) std::sort(adj_lists[i].begin(), adj_lists[i].end()); //printf(" Done\n"); } // remove redundent int num_redundents = 0; if(remove_redundents) { printf("Removing redundent edges..."); for (int i = 0; i < n_vertices; i++) { for (unsigned j = 1; j < adj_lists[i].size(); j ++) { if (adj_lists[i][j] == adj_lists[i][j-1]) { adj_lists[i].erase(adj_lists[i].begin()+j); num_redundents ++; n_edges --; j --; } } } printf(" %d redundent edges are removed\n", num_redundents); } std::cout << "|V| " << n_vertices << " |E| " << n_edges << "\n"; vertices = custom_alloc_global<uint64_t>(n_vertices+1); vertices[0] = 0; max_degree = 0; for (int i = 1; i < n_vertices+1; i++) { auto degree = adj_lists[i-1].size(); if (VertexId(degree) > max_degree) max_degree = VertexId(degree); vertices[i] = vertices[i-1] + degree; } edges = custom_alloc_global<VertexId>(n_edges); //#pragma omp parallel for for (VertexId i = 0; i < n_vertices; i++) { auto begin = vertices[i]; std::copy(adj_lists[i].begin(), adj_lists[i].end(), &edges[begin]); } // generate the reverse (transposed) graph for directed graph if (!symmetrize && need_reverse) { build_reverse_graph(); } for (VertexId i = 0; i < n_vertices; i++) adj_lists[i].clear(); adj_lists.clear(); } void build_reverse_graph() { std::vector<VertexList> reverse_adj_lists(n_vertices); for (VertexId v = 0; v < n_vertices; v++) { //for (auto u : adj_lists[v]) { for (auto u : N(v)) { reverse_adj_lists[u].push_back(v); } } reverse_vertices = custom_alloc_global<uint64_t>(n_vertices+1); reverse_vertices[0] = 0; for (VertexId i = 1; i < n_vertices+1; i++) { auto degree = reverse_adj_lists[i-1].size(); reverse_vertices[i] = reverse_vertices[i-1] + degree; } reverse_edges = custom_alloc_global<VertexId>(n_edges); //#pragma omp parallel for for (VertexId i = 0; i < n_vertices; i++) { auto begin = reverse_vertices[i]; std::copy(reverse_adj_lists[i].begin(), reverse_adj_lists[i].end(), &reverse_edges[begin]); } for (VertexId i = 0; i < n_vertices; i++) reverse_adj_lists[i].clear(); reverse_adj_lists.clear(); } template<typename T> static void map_file(std::string fname, T *& pointer, size_t elements) { int inf = open(fname.c_str(), O_RDONLY, 0); if(-1 == inf) { std::cerr << "Failed to open file: " << fname << "\n"; exit(1); } pointer = (T*)mmap(nullptr, sizeof(T) * elements, PROT_READ, MAP_SHARED, inf, 0); assert(pointer != MAP_FAILED); close(inf); } //std::vector<uint64_t> scale_accesses; public: Graph(std::string prefix, std::string filetype = "bin", bool symmetrize = false, bool need_reverse = false) { if (filetype == "mtx") { std::string filename = prefix + ".mtx"; read_mtx_file(filename, symmetrize, need_reverse); } else if (filetype == "bin") { std::ifstream f_meta((prefix + ".meta.txt").c_str()); assert(f_meta); max_degree = 0; int vid_size; f_meta >> n_vertices >> n_edges >> vid_size >> max_degree; std::cout << "|V| " << n_vertices << " |E| " << n_edges << "\n"; assert(sizeof(VertexId) == vid_size); f_meta.close(); if(map_vertices) map_file(prefix + ".vertex.bin", vertices, n_vertices+1); else read_bin_file(prefix + ".vertex.bin", vertices, n_vertices+1); if(map_edges) map_file(prefix + ".edge.bin", edges, n_edges); else read_bin_file(prefix + ".edge.bin", edges, n_edges); if (!symmetrize && need_reverse) build_reverse_graph(); } directed = false; has_reverse = false; if (!symmetrize && need_reverse) { directed = true; printf("This graph maintains both incomming and outgoing edge-list\n"); has_reverse = true; } if (symmetrize) { printf("This graph is symmetrized\n"); reverse_vertices = vertices; reverse_edges = edges; has_reverse = true; } //std::cout << "max_degree: " << max_degree << "\n"; if (max_degree == 0 || max_degree>=n_vertices) exit(1); //if (use_dag) orientation(); } ~Graph() { if(map_edges) { munmap(edges, n_edges*sizeof(VertexId)); } else { custom_free(edges, n_edges); } if(map_vertices) { munmap(vertices, (n_vertices+1)*sizeof(uint64_t)); } else { custom_free(vertices, n_vertices+1); } } Graph(const Graph &)=delete; Graph& operator=(const Graph &)=delete; VertexSet N(VertexId vid) const { assert(vid >= 0); assert(vid < n_vertices); uint64_t begin = vertices[vid], end = vertices[vid+1]; assert(begin >= 0); if(begin > end) { fprintf(stderr, "vertex %u bounds error: [%lu, %lu)\n", vid, begin, end); exit(1); } assert(end <= n_edges); return VertexSet(edges + begin, end - begin); } VertexSet out_neigh(VertexId vid, VertexId start_offset = 0) const { auto begin = vertices[vid]; auto end = vertices[vid+1]; auto r = std::min(start_offset, VertexId(end - begin)); begin += r; return VertexSet(edges + begin, end - begin); } VertexSet in_neigh(VertexId vid) const { auto begin = reverse_vertices[vid]; auto end = reverse_vertices[vid+1]; return VertexSet(reverse_edges + begin, end - begin); } VertexId V() { return n_vertices; } size_t E() { return n_edges; } size_t size() { return size_t(n_vertices); } size_t sizeEdges() { return n_edges; } VertexId num_vertices() { return n_vertices; } size_t num_edges() { return n_edges; } VertexId get_degree(VertexId v) { return vertices[v+1] - vertices[v]; } VertexId out_degree(VertexId v) { return vertices[v+1] - vertices[v]; } uint64_t edge_begin(VertexId v) { return vertices[v]; } uint64_t edge_end(VertexId v) { return vertices[v+1]; } VertexId getEdgeDst(uint64_t e) { return edges[e]; } VertexId get_max_degree() { return max_degree; } bool is_directed() { return directed; } bool has_reverse_graph() { return has_reverse; } uint64_t* out_rowptr() { return vertices; } VertexId* out_colidx() { return edges; } uint64_t* in_rowptr() { return reverse_vertices; } VertexId* in_colidx() { return reverse_edges; } void orientation() { std::cout << "Orientation enabled, using DAG\n"; std::vector<VertexId> degrees(n_vertices, 0); for (VertexId v = 0; v < n_vertices; v++) { degrees[v] = get_degree(v); } std::vector<VertexId> new_degrees(n_vertices, 0); for (VertexId src = 0; src < n_vertices; src ++) { for (auto dst : N(src)) { if (degrees[dst] > degrees[src] || (degrees[dst] == degrees[src] && dst > src)) { new_degrees[src]++; } } } uint64_t *old_vertices = vertices; VertexId *old_edges = edges; uint64_t *new_vertices = custom_alloc_global<uint64_t>(n_vertices+1); new_vertices[0] = 0; for (VertexId v = 1; v < n_vertices+1; v++) { new_vertices[v] = new_vertices[v-1] + new_degrees[v-1]; } //ParallelPrefixSum(new_degrees, new_vertices); auto num_edges = new_vertices[n_vertices]; VertexId *new_edges = custom_alloc_global<VertexId>(num_edges); for (VertexId src = 0; src < n_vertices; src ++) { auto begin = new_vertices[src]; unsigned offset = 0; for (auto dst : N(src)) { if (degrees[dst] > degrees[src] || (degrees[dst] == degrees[src] && dst > src)) { new_edges[begin+offset] = dst; offset ++; } } } vertices = new_vertices; edges = new_edges; custom_free<uint64_t>(old_vertices, n_vertices); custom_free<VertexId>(old_edges, n_edges); n_edges = num_edges; std::cout << "|V| " << n_vertices << " |E| " << n_edges << "\n"; } }; inline uint64_t intersection_num(const VertexSet& a, const VertexSet& b) { return a.get_intersect_num(b); }
GB_binop__pair_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_int64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_int64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int64_t // A type: int64_t // A pattern? 1 // B type: int64_t // B pattern? 1 // BinaryOp: cij = 1 #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_INT64 || GxB_NO_PAIR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pair_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
sum_int.c
//sum.c #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define N_RUNS 20 #define N 10240000 // read timer in second double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } //Create a matrix and a vector and fill with random numbers void init(int *X) { for (int i = 0; i<N; i++) { X[i] = (int)rand(); } } //Our sum function- what it does is pretty straight-forward. int sum(int *X) { int result = 0; #pragma omp simd reduction(+:result) for (int i = 0; i<N; i++) { result += X[i]; } return result; } // Debug functions int sum_serial(int *X) { int result = 0; for (int i = 0; i<N; i++) { result += X[i]; } return result; } void print_vector(int *vector) { printf("["); for (int i = 0; i<8; i++) { printf("%d ", vector[i]); } puts("]"); } int main(int argc, char **argv) { //Set everything up int *X = malloc(sizeof(int)*N); int result, result_serial; srand(time(NULL)); init(X); //warming up result = sum(X); result_serial = sum_serial(X); double t = 0; double start = read_timer(); for (int i = 0; i<N_RUNS; i++) result = sum(X); t += (read_timer() - start); double t_serial = 0; double start_serial = read_timer(); for (int i = 0; i<N_RUNS; i++) result_serial = sum_serial(X); t_serial += (read_timer() - start_serial); print_vector(X); puts("=\n"); printf("SIMD: %d\n", result); puts("---------------------------------"); printf("Serial: %d\n", result_serial); double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t); double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial); printf("==================================================================\n"); printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n"); printf("------------------------------------------------------------------\n"); printf("Sum (SIMD):\t\t%4f\t%4f\n", t/N_RUNS, gflops); printf("Sum (Serial):\t\t%4f\t%4f\n", t_serial/N_RUNS, gflops_serial); printf("Correctness check: %d\n", result_serial - result); free(X); return 0; }
test35.c
#include<stdio.h> int foo () { printf("Start Foo.\n"); printf("End Foo.\n"); return 0; } int main() { int a; #pragma omp parallel { if (!omp_get_thread_num()) { #pragma omp critical { printf("Hello World\n"); sleep(3); printf("Why are you so weird, world?\n", a); } } else { int i; for (i = 0; i < 3; i++) { #pragma omp atomic update a = a + foo(); } } } printf("%d", a); }
counting.c
#include <stdio.h> #include <stdlib.h> #include <limits.h> #include <omp.h> #include <math.h> #define min(a,b) (((a)<(b))?(a):(b)) #define MIN(a,b) (((a)<(b))?(a):(b)) #define max(a,b) (((a)>(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #define floord(n,d) floor(((double)(n))/((double)(d))) #define ceild(n,d) ceil(((double)(n))/((double)(d))) int ** c; int ** ck; int zz = 2; int ** F; //only ACGU int N; int DIM; char * RNA; //only ACGU #include "mem.h" int paired(int i, int j) { char nt1 = RNA[i]; char nt2 = RNA[j]; if ((nt1 == 'A' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'A') || (nt1 == 'G' && nt2 == 'C') || (nt1 == 'C' && nt2 == 'G') || (nt1 == 'G' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'G')){ return 1;} else return 0; } int main(int argc, char *argv[]){ int num_proc=1; int i,j,k,ll,p,q,l=0; int c0, c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12; int t1, t2, t3, t4, t5, t6,t7,t8,t9,t10; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; srand(time(NULL)); if(argc > 1) num_proc = atoi(argv[1]); int kind=1; N = 8; DIM = 12; if(argc > 2) N = atoi(argv[2]); DIM = N+10; if(argc > 3) kind = atoi(argv[3]); omp_set_num_threads(num_proc); //printf(" -exp(Ebp/RT) = %5.3f\n", ERT); F = mem(); c = mem(); ck = mem(); for(i=0; i<DIM; i++) for(i=0; i<DIM; i++){ c[i][j] = i+j; ck[i][j] = i+j; } RNA = (char*) malloc(DIM * sizeof(char*)); //read from FASTA file rand_seq(RNA, N); for(i=0; i<DIM; i++) printf("%c", RNA[i]); printf("\n"); int check=1; double start = omp_get_wtime(); // compute the partition functions Q and Qbp if(kind==1 || check){ #pragma scop for (i = N-2; i>=1; i--){ for ( j=i+2; j<= N; j++){ for ( k = i; k<=j-l; k++){ ck[i][j] += ck[i][j-1] + paired(k,j) ? ck[i][k-1] + ck[k+1][j-1] : 0; } } } #pragma endscop } if(kind==2) // pluto { /* Start of CLooG code */ /* Start of CLooG code */ if ((N >= 3) && (N >= l+1)) { for (t1=max(3,l+1);t1<=N;t1++) { lbp=0; ubp=min(floord(t1-2,16),floord(t1-l,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5) for (t2=lbp;t2<=ubp;t2++) { for (t3=t2;t3<=floord(t1-l,16);t3++) { for (t4=max(1,16*t2);t4<=min(min(t1-2,t1-l),16*t2+15);t4++) { for (t5=max(16*t3,t4);t5<=min(t1-l,16*t3+15);t5++) { c[t4][t1] += c[t4][t1-1] + paired(t5,t1) ? c[t4][t5-1] + c[t5+1][t1-1] : 0;; } } } } } } /* End of CLooG code */ /* End of CLooG code */ } if(kind==3) // traco { for( c1 = max(0, floord(l - 2, 8) - 1); c1 <= floord(N - 3, 8); c1 += 1) #pragma omp parallel for shared(c1) private(c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12) schedule(dynamic, 1) for( c3 = max(max(0, floord(l - 2, 16)), c1 - (N + 13) / 16 + 1); c3 <= c1 / 2; c3 += 1) for( c5 = 0; c5 <= min(c3 + floord(-l + 1, 16) + 1, floord(-l + N - 1, 16)); c5 += 1) for( c7 = max(-N + 16 * c1 - 16 * c3 + 2, l - N + 16 * c5); c7 <= min(-1, -N + 16 * c1 - 16 * c3 + 17); c7 += 1) { for( c9 = max(l + 16 * c5 - c7, 16 * c3 - c7 + 2); c9 <= min(min(N, l + 16 * c5 - c7 + 16), 16 * c3 - c7 + 17); c9 += 1) { if (c7 + c9 >= 16 * c3 + 3 && c7 + c9 >= l + 16 * c5 + 1) for( c11 = -c7; c11 < 16 * c5 - c7; c11 += 1) c[(-c7)][c9] += c[(-c7)][c9-1] + paired(c11,c9) ? c[(-c7)][c11-1] + c[c11+1][c9-1] : 0; for( c11 = 16 * c5 - c7; c11 <= min(16 * c5 - c7 + 15, -l + c9); c11 += 1) c[(-c7)][c9] += c[(-c7)][c9-1] + paired(c11,c9) ? c[(-c7)][c11-1] + c[c11+1][c9-1] : 0; } if (16 * c3 >= l + 16 * c5 + 15) for( c11 = 16 * c5 - c7; c11 <= 16 * c5 - c7 + 15; c11 += 1) c[(-c7)][(16*c3-c7+2)] += c[(-c7)][(16*c3-c7+2)-1] + paired(c11,(16*c3-c7+2)) ? c[(-c7)][c11-1] + c[c11+1][(16*c3-c7+2)-1] : 0; } } if(kind==4) // traco { for( c0 = max(0, floord(l - 2, 8) - 1); c0 <= floord(N - 3, 8); c0 += 1) #pragma omp parallel for shared(c0) private(c1, c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12) schedule(dynamic, 1) for( c1 = (c0 + 1) / 2; c1 <= min(min(c0, c0 + (-l + 1/16) + 1), (N - 3) / 16); c1 += 1) for( c3 = max(l, 16 * c0 - 16 * c1 + 2); c3 <= min(N - 1, 16 * c0 - 16 * c1 + 17); c3 += 1) for( c4 = max(0, -c1 + (N - 1) / 16 - 1); c4 <= min((-l + N) / 16, -c1 + (-l + N + c3 - 2) / 16); c4 += 1) for( c6 = max(max(-N + 16 * c1 + 2, -N + c3), -16 * c4 - 15); c6 <= min(min(-1, -N + 16 * c1 + 17), -l + c3 - 16 * c4); c6 += 1) for( c10 = max(16 * c4, -c6); c10 <= min(16 * c4 + 15, -l + c3 - c6); c10 += 1) c[(-c6)][(c3-c6)] += c[(-c6)][(c3-c6)-1] + paired(c10,(c3-c6)) ? c[(-c6)][c10-1] + c[c10+1][(c3-c6)-1] : 0; if(1==0) for( c0 = max(0, l + floord(l - 2, 16) - 2); c0 < N + floord(N - 3, 16) - 2; c0 += 1) #pragma omp parallel for shared(c0) private(c1, c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12) schedule(dynamic, 1) for( c1 = c0 - (c0 + 17) / 17 + 1; c1 <= min(min(N - 3, c0), c0 + (-l + 1)/16 + 1); c1 += 1) for( c3 = max(l, 16 * c0 - 16 * c1 + 2); c3 <= min(c1 + 2, 16 * c0 - 16 * c1 + 17); c3 += 1) for( c4 = (N - c1 - 2) / 16; c4 <= (-l + N - c1 + c3 - 2) / 16; c4 += 1) for( c10 = max(N - c1 - 2, 16 * c4); c10 <= min(-l + N - c1 + c3 - 2, 16 * c4 + 15); c10 += 1) c[(N-c1-2)][(N-c1+c3-2)] += c[(N-c1-2)][(N-c1+c3-2)-1] + paired(c10,(N-c1+c3-2)) ? c[(N-c1-2)][c10-1] + c[c10+1][(N-c1+c3-2)-1] : 0; } double stop = omp_get_wtime(); printf("%.4f\n",stop - start); //printf("Q\n"); //rna_array_print(Q); //printf("Qbp\n"); //rna_array_print(Qbp); for(i=0; i<DIM; i++) for(j=0; j<DIM; j++) if(c[i][j] != ck[i][j]){ printf("err: %d %d %d %d\n", i, j,c[i][j], ck[i][j]); exit(0); } return 0; }
7434.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (i, j, k) num_threads(#P11) { /* E := A*B */ #pragma omp parallel for schedule(static, 28) simd for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ #pragma omp parallel for schedule(static, 28) simd for (i = 0; i < _PB_NJ; i++) { for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ #pragma omp parallel for schedule(static, 28) simd for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
pr58551.c
/* PR middle-end/58551 */ /* { dg-do compile } */ /* { dg-options "-O0 -fopenmp" } */ void foo (int *a) { int i; for (i = 0; i < 8; i++) #pragma omp task if (a[i]) __builtin_abort (); } void bar (int, int); void baz (int *a) { int i; for (i = 0; i < 8; i++) #pragma omp task if (a[i]) { int j, k; for (j = 0; j < 10; j++) for (k = 0; k < 8; k++) bar (j, k); for (k = 0; k < 12; k++) bar (-1, k); __builtin_abort (); } }
dfa8.c
#include <stdio.h> #include <stdint.h> #include <string.h> #include "dfa.h" #ifdef _OPENMP #include "omp.h" #endif void r8_get_diffMC(const int col8, const word32 diff_col, const int col9, word32 diffMC_list[DIFF_MC_MAX], int *len) { int i, c1, c2, diff; int fault_list[255]; int fault_list_len = 0; int row9 = -1; /* if fault position is known */ if (col8 != -1) { row9 = (col8 + 3*col9) % 4; } /* case 1: fault in round 8 is known */ if (diff_col != 0) { diff = (int)TAKEBYTE(diff_col, row9); for (c1 = 1; c1 < 255; c1++) { c2 = diff ^ c1; if (c1 > c2) { continue; } /* fault_list_len = 127 */ fault_list[fault_list_len++] = (int)(sbox[c1] ^ sbox[c2]); } } /* case 2: unknown fault */ else { for (i = 0; i < 255; i++) { fault_list[i] = i + 1; } fault_list_len = 255; } /* construct the MC differences table */ *len = get_diff_MC(row9, fault_list, fault_list_len, diffMC_list); } void r8_find_candidates(const byte ct[16], const byte fct[16], const int row8, const int col8, const int fault, word32 candidates[4][CAND_MAX], int cand_len[4]) { byte tmp[4] = {0, 0, 0, 0}; int col9, len; word32 diffMC_list[DIFF_MC_MAX]; word32 diff_col = 0; /* fault position and value known */ if (row8 != -1 && col8 != -1 && fault != -1) { tmp[row8] = fault; mixColumn(tmp); diff_col = bytes_to_word(tmp); } for (col9 = 0; col9 < 4; col9++) { r8_get_diffMC(col8, diff_col, col9, diffMC_list, &len); cand_len[col9] = k10_cand_from_diffMC(ct, fct, col9, diffMC_list, len, candidates[col9]); } } int r8_exhaustive_search(const byte pt0[16], const byte ct0[16], const byte ct[16], const byte fct[16], const int row8, const int col8, const int fault, const word32 candidates[4][CAND_MAX], const int cand_len[4], byte masterkey[16]) { int i, j, k, l, ii; int found = 0; byte subkey10[16]; byte subkey9[16]; byte subkeys[176]; byte cttmp[16]; byte fcttmp[16]; byte ctcmp[16]; byte diff[4]; #ifdef _OPENMP #pragma omp parallel for private(subkey10,subkey9,cttmp,fcttmp,j,k,l,ii,diff) shared(found) #endif for (i = 0; i < cand_len[0]; i++) { if (found) { continue; } fprintf(stderr, "Progress: %d/%d\n", i + 1, cand_len[0]); subkey10[0] = TAKEBYTE(candidates[0][i], 0); subkey10[13] = TAKEBYTE(candidates[0][i], 1); subkey10[10] = TAKEBYTE(candidates[0][i], 2); subkey10[7] = TAKEBYTE(candidates[0][i], 3); for (j = 0; j < cand_len[1]; j++) { subkey10[4] = TAKEBYTE(candidates[1][j], 0); subkey10[1] = TAKEBYTE(candidates[1][j], 1); subkey10[14] = TAKEBYTE(candidates[1][j], 2); subkey10[11] = TAKEBYTE(candidates[1][j], 3); for (k = 0; k < cand_len[2]; k++) { subkey10[8] = TAKEBYTE(candidates[2][k], 0); subkey10[5] = TAKEBYTE(candidates[2][k], 1); subkey10[2] = TAKEBYTE(candidates[2][k], 2); subkey10[15] = TAKEBYTE(candidates[2][k], 3); for (l = 0; l < cand_len[3]; l++) { subkey10[12] = TAKEBYTE(candidates[3][l], 0); subkey10[9] = TAKEBYTE(candidates[3][l], 1); subkey10[6] = TAKEBYTE(candidates[3][l], 2); subkey10[3] = TAKEBYTE(candidates[3][l], 3); k9_from_k10(subkey10, subkey9); for (ii = 0; ii < 16; ii++) { cttmp[ii] = ct[ii] ^ subkey10[ii]; fcttmp[ii] = fct[ii] ^ subkey10[ii]; } invShiftRows(cttmp); invSubBytes(cttmp); invShiftRows(fcttmp); invSubBytes(fcttmp); for (ii = 0; ii < 16; ii++) { cttmp[ii] ^= subkey9[ii]; fcttmp[ii] ^= subkey9[ii]; } invMixColumns(cttmp); invMixColumns(fcttmp); for (ii = 0; ii < 4; ii++) { diff[ii] = invsbox[cttmp[POSITIONS[col8][ii]]] ^ invsbox[fcttmp[POSITIONS[col8][ii]]]; } invMixColumn(diff); /* tests to validate if key candidate is consistent with the fault */ if (diff[0] != 0) { if (diff[1] != 0 || diff[2] != 0 || diff[3] != 0 || row8 > 0 || (fault != -1 && fault != diff[0])) { continue; } } else if (diff[1] != 0) { if (diff[2] != 0 || diff[3] != 0 || (row8 != -1 && row8 != 1) || (fault != -1 && fault != diff[1])) { continue; } } else if (diff[2] != 0) { if (diff[3] != 0 || (row8 != -1 && row8 != 2) || (fault != -1 && fault != diff[2])) { continue; } } else { if ((row8 != -1 && row8 != 3) || (fault != -1 && fault != diff[3])) { continue; } } /* one final test if previous tests passed */ reverseKeyExpansion(subkey10, subkeys); encrypt_aes(pt0, ctcmp, subkeys, AES_ROUNDS_128); if (memcmp(ct0, ctcmp, 16) == 0) { memcpy(masterkey, subkeys, 16); found = 1; } } /* end for l */ } /* end for k */ } /* end for j */ } /* end for i */ return found; } /* * return value * 0: not found * 1: found * -1: error (nb_cand = 0) */ int r8_key_recovery_single_ct(const byte pt0[16], const byte ct0[16], const byte ct[16], const byte fct[16], const int fault_pos, const int fault, byte masterkey[16]) { int i, col8; int cand_len[4]; int found = -1; int row8 = -1; int col8_start = 0; int col8_end = 4; long int nb_cand; word32 candidates[4][CAND_MAX]; if (fault_pos >= 0 && fault_pos < 16) { row8 = fault_pos % 4; col8_start = fault_pos / 4; col8_end = col8_start + 1; } for (col8 = col8_start; col8 < col8_end; col8++) { r8_find_candidates(ct, fct, row8, col8, fault, candidates, cand_len); nb_cand = 1; for (i = 0; i < 4; i++) { nb_cand *= (long int)cand_len[i]; } printf("Hypothesis: fault in column %d\n" "Number of candidates for positions 0, 13, 10, 7: %d\n" " 4, 1, 14, 11: %d\n" " 8, 5, 2, 15: %d\n" " 12, 9, 6, 3: %d\n" "Number of Master Key candidates: %ld\n", col8, cand_len[0], cand_len[1], cand_len[2], cand_len[3], nb_cand); if (nb_cand > 0) { found = r8_exhaustive_search(pt0, ct0, ct, fct, row8, col8, fault, candidates, cand_len, masterkey); } if (found == 1) { break; } } return found; } /* * In case of several (ct,fct) pairs, we only do intersections * of candidates followed by exhaustive search * return value: * 0: not found * 1: found * -1: error (nb_cand = 0) */ int r8_key_recovery(const byte pt0[16], const byte ct0[16], const byte ct_list[][16], const byte fct_list[][16], const int fault_pos_list[PAIRS_MAX], const int fault_list[PAIRS_MAX], const int fct_len, byte masterkey[16]) { int i, j; int cand_tmp_len[4], cand_len[4]; int found = -1; int row8 = -1; int col8 = -1; long int nb_cand; word32 candidates[4][CAND_MAX]; word32 cand_tmp[4][CAND_MAX]; /* we first get candidates for a faulty ciphertext */ if (fault_pos_list[0] >= 0 && fault_pos_list[0] < 16) { row8 = fault_pos_list[0] % 4; col8 = fault_pos_list[0] / 4; } r8_find_candidates(ct_list[0], fct_list[0], row8, col8, fault_list[0], candidates, cand_len); /* we reduce the candidates with other pairs (ct,fct) */ for (i = 1; i < fct_len; i++) { row8 = -1; col8 = -1; if (fault_pos_list[i] >= 0 && fault_pos_list[i] < 16) { row8 = fault_pos_list[i] % 4; col8 = fault_pos_list[i] / 4; } r8_find_candidates(ct_list[i], fct_list[i], row8, col8, fault_list[i], cand_tmp, cand_tmp_len); /* intersection with previous candidates */ for (j = 0; j < 4; j++) { intersection(candidates[j], &cand_len[j], cand_tmp[j], cand_tmp_len[j]); } } nb_cand = 1; for (i = 0; i < 4; i++) { nb_cand *= (long int)cand_len[i]; } printf("Number of candidates for positions 0, 13, 10, 7: %d\n" " 4, 1, 14, 11: %d\n" " 8, 5, 2, 15: %d\n" " 12, 9, 6, 3: %d\n" "Number of Master Key candidates: %ld\n", cand_len[0], cand_len[1], cand_len[2], cand_len[3], nb_cand); if (nb_cand > 0) { found = exhaustive_search(pt0, ct0, candidates, cand_len, masterkey); } return found; }
alt_veclib.c
#include <stdio.h> #include <omp.h> int main(void) { int isHost = 1; #pragma omp target map(tofrom: isHost) { isHost = omp_is_initial_device(); printf("Hello world. %d\n", 100); for (int i =0; i<5; i++) { printf("Hello world. iteration %d\n", i); } } printf("Target region executed on the %s\n", isHost ? "host" : "device"); return isHost; }
test83.c
struct { int a;} b; int foo(int a, int b) { return a; } int bar(int a, int b) { return b; } void pr(char * str) {} int main() { int y = 10; int i[4]; int a = 10; int (*fptr[4])(int, int); int p[4]; p[3] = 0; fptr[3] = &foo; pr("Below"); i[3] = fptr[3](a * 10, bar(2, p[3])); #pragma omp parallel { } }
matrix_mul_openmp.c
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <omp.h> /* Simple matrix multiplication example. */ /* matrix multiplication */ void matrix_mult(int const N, int const M, int const K, double A[N][M], double B[M][K], double C[N][K]) { for(int i = 0; i < N; i++) { for(int j = 0; j < K; j++) { C[i][j] = 0; } } #pragma loop1 #pragma omp parallel for default(shared) firstprivate(N, K, M, A, B) for(int i = 0; i < N; i++) { for(int j = 0; j < K; j++) { for(int l = 0; l < M; l++) { C[i][j] += A[i][l] * B[l][j]; } } } } /* * Set an N by M matrix A to random values */ void init_matrix(int const N, int const M, double A[N][M]) { for(int i = 0; i < N; ++i) { for(int j = 0; j < M; ++j) { A[i][j] = ((double) rand()) / (double) 2147483647; } } } void print_matrix_result(int const N, int const K, double A[N][K]) { double acc = 0.0; for(int i = 0; i < N; ++i) { for(int j = 0; j < K; ++j) { acc += A[i][j]; } } printf("Result acc: %f\n", acc); } void test_matrix_mul() { /* int N=2048; int M=1024; int K=2048; */ int N = 1024; int M = 1024; int K = 512; // allocate matrices double (*A)[M] = malloc(sizeof(double[N][M])); double (*B)[K] = malloc(sizeof(double[M][K])); double (*C)[K] = malloc(sizeof(double[N][K])); // initialize matrices init_matrix(N, M, A); init_matrix(M, K, B); // do: C = A*B matrix_mult(N, M, K, A, B, C); print_matrix_result(N, K, C); // free free(C); free(B); free(A); } int main() { // To make results repeatable srand(0); test_matrix_mul(); }
par_mgr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" #ifdef HYPRE_USING_DSUPERLU #include "dsuperlu.h" #endif #if defined(HYPRE_USING_CUDA) void hypre_NoGPUSupport(char *option) { char msg[256]; hypre_sprintf(msg, "Error: Chosen %s option is not currently supported on GPU\n\n", option); hypre_printf("%s ", msg); // hypre_error_w_msg(1, msg); hypre_MPI_Abort(hypre_MPI_COMM_WORLD, -1); } #endif /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> point_marker_array) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; #if defined(HYPRE_USING_CUDA) (mgr_data -> P_FF_array) = NULL; #endif (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> A_ff_array) = NULL; (mgr_data -> F_fine_array) = NULL; (mgr_data -> U_fine_array) = NULL; (mgr_data -> aff_solver) = NULL; (mgr_data -> fine_grid_solver_setup) = NULL; (mgr_data -> fine_grid_solver_solve) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> fsolver_mode) = -1; // set to -1 to avoid printing when not used (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-6; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms. (mgr_data -> interp_type) = NULL; (mgr_data -> restrict_type) = NULL; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> frelax_print_level) = 0; (mgr_data -> cg_print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> idx_array) = NULL; (mgr_data -> Frelax_method) = NULL; (mgr_data -> VcycleRelaxVtemp) = NULL; (mgr_data -> VcycleRelaxZtemp) = NULL; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> Frelax_num_functions) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> use_non_galerkin_cg) = NULL; (mgr_data -> print_coarse_system) = 0; (mgr_data -> set_c_points_method) = 0; (mgr_data -> lvl_to_keep_cpoints) = 0; (mgr_data -> cg_convergence_factor) = 0.0; (mgr_data -> truncate_coarse_grid_threshold) = 0.0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if (mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if ((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if ((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms), HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if ((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if ((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if ((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if ((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if ((mgr_data -> use_default_cgrid_solver)) { if ((mgr_data -> coarse_grid_solver)) { hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); } (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i = 0; i < (num_coarse_levels); i++) { hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]); } hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i = 0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) { hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); } hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if (mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i = 1; i < num_coarse_levels + 1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i - 1]) { hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i - 1]); } if ((mgr_data -> RT_array)[i - 1]) { hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i - 1]); } hypre_IntArrayDestroy(mgr_data -> CF_marker_array[i - 1]); } for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } } #if defined(HYPRE_USING_CUDA) if (mgr_data -> P_FF_array) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> P_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> P_FF_array)[i]); } } //hypre_TFree(P_FF_array, hypre_HandleMemoryLocation(hypre_handle())); hypre_TFree((mgr_data -> P_FF_array), HYPRE_MEMORY_HOST); } #endif /* AMG for Frelax */ if (mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array) { for (i = 1; i < num_coarse_levels + 1; i++) { if (mgr_data -> F_fine_array[i]) { hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]); } if (mgr_data -> U_fine_array[i]) { hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]); } } for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_ff_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]); } } if (mgr_data -> fsolver_mode > 0) { if ((mgr_data -> A_ff_array)[0]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]); } } hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> F_fine_array) = NULL; hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> U_fine_array) = NULL; hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST); (mgr_data -> A_ff_array) = NULL; } if (mgr_data -> aff_solver) { for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> aff_solver)[i]) { hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]); } } if (mgr_data -> fsolver_mode == 2) { if ((mgr_data -> aff_solver)[0]) { hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]); } } hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST); (mgr_data -> aff_solver) = NULL; } if ((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if ((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if ((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if ((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if ((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if ((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if ((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } if (mgr_data -> restrict_type) { hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } if (mgr_data -> interp_type) { hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } /* Frelax_method */ if (mgr_data -> Frelax_method) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } /* Frelax_num_functions */ if (mgr_data -> Frelax_num_functions) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } /* data for V-cycle F-relaxation */ if ((mgr_data -> VcycleRelaxVtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) ); (mgr_data -> VcycleRelaxVtemp) = NULL; } if ((mgr_data -> VcycleRelaxZtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) ); (mgr_data -> VcycleRelaxZtemp) = NULL; } if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); (mgr_data -> FrelaxVcycleData) = NULL; } /* data for reserved coarse nodes */ if (mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* index array for setting Cpoints by global block */ if ((mgr_data -> set_c_points_method) == 1) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } /* array for setting option to use non-Galerkin coarse grid */ if (mgr_data -> use_non_galerkin_cg) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) { hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); } if ((mgr_data -> diaginv)) { hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); } if ((mgr_data -> global_smoother)) { if (mgr_data -> global_smooth_type == 8) { HYPRE_EuclidDestroy((mgr_data -> global_smoother)); } else if (mgr_data -> global_smooth_type == 16) { HYPRE_ILUDestroy((mgr_data -> global_smoother)); } } /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; hypre_ParAMGDataNumFunctions(vdata) = 1; hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0; hypre_ParAMGDataRelaxOrder(vdata) = 1; hypre_ParAMGDataMaxCoarseSize(vdata) = 9; hypre_ParAMGDataMinCoarseSize(vdata) = 0; hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST); for (i = 1; i < num_levels + 1; i++) { if (hypre_ParAMGDataAArray(vdata)[i]) { hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); } if (hypre_ParAMGDataPArray(vdata)[i - 1]) { hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i - 1]); } hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[i - 1]); hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST); } if (num_levels < 1) { hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[0]); } /* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */ //hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); //hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST); /* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) { hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); } if (hypre_ParAMGDataBVec(vdata)) { hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); } if (hypre_ParAMGDataCommInfo(vdata)) { hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); } if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Set whether the reserved C points are reduced before the coarse grid solve */ HYPRE_Int hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> lvl_to_keep_cpoints) = level; return hypre_error_flag; } /* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */ HYPRE_Int hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_BigInt *begin_idx_array, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; if ((mgr_data -> idx_array) != NULL) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST); if (begin_idx_array != NULL) { for (i = 0; i < block_size; i++) { index_array[i] = *(begin_idx_array + i); } } hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points, block_coarse_indexes); (mgr_data -> idx_array) = index_array; (mgr_data -> set_c_points_method) = 1; return hypre_error_flag; } /* Initialize/ set local block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i, j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if ((mgr_data -> block_cf_marker) != NULL) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if ((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for (j = 0; j < block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if (max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_num_coarse_indexes[i] = block_num_coarse_points[i]; } } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> set_c_points_method) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *lvl_num_coarse_points, HYPRE_Int **lvl_coarse_indexes, HYPRE_Int *point_marker_array) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; /* free block cf_marker data if not previously destroyed */ if ((mgr_data -> block_cf_marker) != NULL) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if ((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for (j = 0; j < lvl_num_coarse_points[i]; j++) { block_cf_marker[i][j] = lvl_coarse_indexes[i][j]; } } /* store block_num_coarse_points */ if (max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_num_coarse_indexes[i] = lvl_num_coarse_points[i]; } } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> point_marker_array) = point_marker_array; (mgr_data -> set_c_points_method) = 2; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_BigInt *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! MGR object empty!\n"); return hypre_error_flag; } if (reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if ((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if (reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for (i = 0; i < reserved_coarse_size; i++) { reserved_coarse_indexes[i] = reserved_cpt_index[i]; } } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, hypre_IntArray **CF_marker_ptr, HYPRE_Int cflag) { HYPRE_Int *CF_marker = NULL; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int i, row, nc; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if (cflag) { if (*CF_marker_ptr != NULL) { hypre_IntArrayDestroy(*CF_marker_ptr); } *CF_marker_ptr = hypre_IntArrayCreate(nloc); hypre_IntArrayInitialize(*CF_marker_ptr); hypre_IntArraySetConstantValues(*CF_marker_ptr, FMRK); CF_marker = hypre_IntArrayData(*CF_marker_ptr); /* first mark fixed coarse set */ nc = fixed_coarse_size; for (i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, CF_marker_ptr); CF_marker = hypre_IntArrayData(*CF_marker_ptr); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for (i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row < nloc; row++) { if (CF_marker[row] == CMRK) { continue; } CF_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row < nloc; row++) { /* loop through new c-points */ if (CF_marker[row] == CMRK) { nc++; } else if (CF_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { CF_marker[row] = FMRK; } } /* check if this should be last level */ if ( nc == fixed_coarse_size) { last_level = 1; } //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } return hypre_error_flag; } HYPRE_Int hypre_ExtendWtoPHost(HYPRE_Int P_nr_of_rows, HYPRE_Int *CF_marker, HYPRE_Int *W_diag_i, HYPRE_Int *W_diag_j, HYPRE_Complex *W_diag_data, HYPRE_Int *P_diag_i, HYPRE_Int *P_diag_j, HYPRE_Complex *P_diag_data, HYPRE_Int *W_offd_i, HYPRE_Int *P_offd_i ) { HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_Int i, jj; HYPRE_Real one = 1.0; /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, P_nr_of_rows, HYPRE_MEMORY_HOST); for (i = 0; i < P_nr_of_rows; i++) { fine_to_coarse[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < P_nr_of_rows; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { fine_to_coarse[i] = coarse_counter; coarse_counter++; } } /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; row_counter = 0; for (i = 0; i < P_nr_of_rows; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = W_diag_i[row_counter]; jj < W_diag_i[row_counter + 1]; jj++) { //P_marker[row_counter] = jj_counter; P_diag_j[jj_counter] = W_diag_j[jj]; P_diag_data[jj_counter] = W_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_counter_offd += W_offd_i[row_counter + 1] - W_offd_i[row_counter]; row_counter++; } /* update off-diagonal row pointer */ P_offd_i[i + 1] = jj_counter_offd; } P_diag_i[P_nr_of_rows] = jj_counter; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); return 0; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildPHost( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; HYPRE_Int A_nr_of_rows = hypre_ParCSRMatrixNumRows(A); hypre_ParCSRMatrix *A_FF = NULL, *A_FC = NULL, *P = NULL; hypre_CSRMatrix *W_diag = NULL, *W_offd = NULL; HYPRE_Int P_diag_nnz, nfpoints; HYPRE_Int *P_diag_i = NULL, *P_diag_j = NULL, *P_offd_i = NULL; HYPRE_Complex *P_diag_data = NULL, *diag = NULL, *diag1 = NULL; HYPRE_BigInt nC_global; HYPRE_Int i; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); nfpoints = 0; for (i = 0; i < A_nr_of_rows; i++) { if (CF_marker[i] == -1) { nfpoints++; } } if (method > 0) { hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, NULL, &A_FC, &A_FF); diag = hypre_CTAlloc(HYPRE_Complex, nfpoints, memory_location_P); if (method == 1) { // extract diag inverse sqrt // hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 3); // L1-Jacobi-type interpolation HYPRE_Complex scal = 1.0; diag1 = hypre_CTAlloc(HYPRE_Complex, nfpoints, memory_location_P); hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 0); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixDiag(A_FF), NULL, NULL, diag1, 1, 1.0, "set"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixDiag(A_FC), NULL, NULL, diag1, 1, 1.0, "add"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixOffd(A_FF), NULL, NULL, diag1, 1, 1.0, "add"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixOffd(A_FC), NULL, NULL, diag1, 1, 1.0, "add"); for (i = 0; i < nfpoints; i++) { HYPRE_Complex dsum = diag[i] + scal * (diag1[i] - hypre_cabs(diag[i])); diag[i] = 1. / dsum; } hypre_TFree(diag1, memory_location_P); } else if (method == 2) { // extract diag inverse hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 2); } for (i = 0; i < nfpoints; i++) { diag[i] = -diag[i]; } hypre_Vector *D_FF_inv = hypre_SeqVectorCreate(nfpoints); hypre_VectorData(D_FF_inv) = diag; hypre_SeqVectorInitialize_v2(D_FF_inv, memory_location_P); hypre_CSRMatrixDiagScale(hypre_ParCSRMatrixDiag(A_FC), D_FF_inv, NULL); hypre_CSRMatrixDiagScale(hypre_ParCSRMatrixOffd(A_FC), D_FF_inv, NULL); hypre_SeqVectorDestroy(D_FF_inv); W_diag = hypre_ParCSRMatrixDiag(A_FC); W_offd = hypre_ParCSRMatrixOffd(A_FC); nC_global = hypre_ParCSRMatrixGlobalNumCols(A_FC); } else { W_diag = hypre_CSRMatrixCreate(nfpoints, A_nr_of_rows - nfpoints, 0); W_offd = hypre_CSRMatrixCreate(nfpoints, 0, 0); hypre_CSRMatrixInitialize_v2(W_diag, 0, memory_location_P); hypre_CSRMatrixInitialize_v2(W_offd, 0, memory_location_P); if (my_id == (num_procs - 1)) { nC_global = num_cpts_global[1]; } hypre_MPI_Bcast(&nC_global, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); } /* Construct P from matrix product W_diag */ P_diag_nnz = hypre_CSRMatrixNumNonzeros(W_diag) + hypre_CSRMatrixNumCols(W_diag); P_diag_i = hypre_CTAlloc(HYPRE_Int, A_nr_of_rows + 1, memory_location_P); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_nnz, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Complex, P_diag_nnz, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, A_nr_of_rows + 1, memory_location_P); /* Extend W data to P data */ hypre_ExtendWtoPHost( A_nr_of_rows, CF_marker, hypre_CSRMatrixI(W_diag), hypre_CSRMatrixJ(W_diag), hypre_CSRMatrixData(W_diag), P_diag_i, P_diag_j, P_diag_data, hypre_CSRMatrixI(W_offd), P_offd_i ); // finalize P P = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), nC_global, hypre_ParCSRMatrixColStarts(A), num_cpts_global, hypre_CSRMatrixNumCols(W_offd), P_diag_nnz, hypre_CSRMatrixNumNonzeros(W_offd) ); hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(P)) = memory_location_P; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(P)) = memory_location_P; hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(P)) = P_diag_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixDiag(P)) = P_diag_j; hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(P)) = P_diag_data; hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(P)) = P_offd_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixJ(W_offd); hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixData(W_offd); hypre_CSRMatrixJ(W_offd) = NULL; hypre_CSRMatrixData(W_offd) = NULL; if (method > 0) { hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(A_FC); hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(A_FC); hypre_ParCSRMatrixColMapOffd(A_FC) = NULL; hypre_ParCSRMatrixColMapOffd(A_FC) = NULL; hypre_ParCSRMatrixNumNonzeros(P) = hypre_ParCSRMatrixNumNonzeros( A_FC) + hypre_ParCSRMatrixGlobalNumCols(A_FC); } else { hypre_ParCSRMatrixNumNonzeros(P) = nC_global; } hypre_ParCSRMatrixDNumNonzeros(P) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(P); hypre_MatvecCommPkgCreate(P); *P_ptr = P; if (A_FF) { hypre_ParCSRMatrixDestroy(A_FF); } if (A_FC) { hypre_ParCSRMatrixDestroy(A_FC); } if (method <= 0) { hypre_CSRMatrixDestroy(W_diag); hypre_CSRMatrixDestroy(W_offd); } return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ((CF_marker[i1] >= 0) && (method > 0)) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if ((CF_marker_offd[i1] >= 0) && (method > 0)) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { if (CF_marker[i] < 0) { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ( i == i1 ) /* diagonal of A only */ { a_diag[i] = 1.0 / A_diag_data[jj]; } } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } else { P_marker_offd = NULL; } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if ((CF_marker[i1] >= 0) && (method > 0)) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; /* if(method == 0) { P_diag_data[jj_counter] = 0.0; } */ if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if ((CF_marker_offd[i1] >= 0) && (method > 0)) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; /* if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } */ if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { P_marker[i] = 0; } num_cols_P_offd = 0; for (i = 0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) if (CF_marker[i] == -3) { CF_marker[i] = -1; } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ( i == i1 ) /* diagonal of A only */ { a_diag[i] = 1.0 / A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } else { P_marker_offd = NULL; } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { P_marker[i] = 0; } num_cols_P_offd = 0; for (i = 0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) if (CF_marker[i] == -3) { CF_marker[i] = -1; } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Scale ParCSR matrix A = scalar * A * A: the target CSR matrix * vector: array of real numbers */ HYPRE_Int hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector, hypre_ParCSRMatrix *A) { HYPRE_Int i, j, n_local; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); n_local = hypre_CSRMatrixNumRows(A_diag); for (i = 0; i < n_local; i++) { HYPRE_Real factor = vector[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { A_diag_data[j] *= factor; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { A_offd_data[j] *= factor; } } return (0); } /************************************************************ * Available methods: * 0: inv(A_FF) approximated by its diagonal inverse * 1: inv(A_FF) approximated by sparse approximate inverse *************************************************************/ HYPRE_Int hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix *RT, HYPRE_Int bsize, HYPRE_Int ordering, HYPRE_Int method, HYPRE_Int Pmax, HYPRE_Int keep_stencil, HYPRE_Int *CF_marker, hypre_ParCSRMatrix **A_h_ptr) { HYPRE_Int *c_marker, *f_marker; HYPRE_Int n_local_fine_grid, i, i1, jj; hypre_ParCSRMatrix *A_cc; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_cf; hypre_ParCSRMatrix *A_h; hypre_ParCSRMatrix *A_h_correction; HYPRE_Int max_elmts = Pmax; // HYPRE_Real wall_time = 0.; hypre_ParCSRMatrix *P_mod = NULL; HYPRE_Int my_id; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm, &my_id); HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fine_grid; i++) { HYPRE_Int point_type = CF_marker[i]; hypre_assert(point_type == 1 || point_type == -1); c_marker[i] = point_type; f_marker[i] = -point_type; } // get the A_cc sub-block hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc); if (method == 0) { if (keep_stencil) { //wall_time = time_getWallclockSeconds(); hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); // extract the diagonal of A_ff and compute D_ff_inv hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff); HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag); HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag); HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag); HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag); HYPRE_Real *D_ff_inv; D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i + 1]; jj++) { i1 = A_ff_diag_j[jj]; if ( i == i1 ) { D_ff_inv[i] = -1.0 / A_ff_diag_data[jj]; } } } // extract the diagonal of A_cf hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf); HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag); HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag); HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag); n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag); HYPRE_Real *D_cf; D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { i1 = A_cf_diag_j[A_cf_diag_i[i]]; D_cf[i] = A_cf_diag_data[jj]; } // compute the triple product hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc); hypre_ParCSRMatrixLeftScale(D_cf, A_fc); A_h_correction = A_fc; hypre_TFree(D_cf, HYPRE_MEMORY_HOST); hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_cf); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time); } else { //wall_time = time_getWallclockSeconds(); P_mod = hypre_ParCSRMatrixCompleteClone(P); hypre_ParCSRMatrixCopy(P, P_mod, 1); HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod); hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod); HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag); HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag); for (i = 0; i < n_local_rows; i ++) { if (CF_marker[i] >= 0) { HYPRE_Int ii = P_mod_diag_i[i]; P_mod_diag_data[ii] = 0.0; } } hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product time new: %1.5f\n", wall_time); hypre_ParCSRMatrixDestroy(P_mod); } } else { // Approximate inverse for ideal interploation hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); hypre_ParCSRMatrix *A_ff_inv = NULL; hypre_ParCSRMatrix *minus_Wp = NULL; hypre_MGRApproximateInverse(A_ff, &A_ff_inv); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); A_h_correction = hypre_ParMatmul(A_cf, minus_Wp); hypre_ParCSRMatrixDestroy(minus_Wp); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_cf); } // perform dropping for A_h_correction // specific to multiphase poromechanics // we only keep the diagonal of each block //wall_time = time_getWallclockSeconds(); HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction)); hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction); HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag); HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag); hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction); HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd); if (Pmax > 0) { if (ordering == 0) // interleaved ordering { HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1, memory_location); HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int, (bsize + max_elmts) * n_local_cpoints, memory_location); HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex, (bsize + max_elmts) * n_local_cpoints, memory_location); HYPRE_Int num_nonzeros_diag_new = 0; HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1, memory_location); HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts * n_local_cpoints, memory_location); HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex, max_elmts * n_local_cpoints, memory_location); HYPRE_Int num_nonzeros_offd_new = 0; for (i = 0; i < n_local_cpoints; i++) { HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i + 1] - A_h_correction_diag_i[i] + A_h_correction_offd_i[i + 1] - A_h_correction_offd_i[i]; HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Int row_start = i - (i % bsize); HYPRE_Int row_stop = row_start + bsize - 1; HYPRE_Int cnt = 0; for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i + 1]; jj++) { aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag; aux_data[cnt] = A_h_correction_offd_data[jj]; cnt++; } for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++) { aux_j[cnt] = A_h_correction_diag_j[jj]; aux_data[cnt] = A_h_correction_diag_data[jj]; cnt++; } hypre_qsort2_abs(aux_j, aux_data, 0, cnt - 1); for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++) { i1 = A_h_correction_diag_j[jj]; if (i1 >= row_start && i1 <= row_stop) { // copy data to new arrays A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1; A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj]; ++num_nonzeros_diag_new; } else { // Do nothing } } if (max_elmts > 0) { for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++) { HYPRE_Int col_idx = aux_j[jj]; HYPRE_Real col_value = aux_data[jj]; if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop)) { A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx; A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value; ++num_nonzeros_diag_new; } else if (col_idx >= ncol_diag) { A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag; A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value; ++num_nonzeros_offd_new; } } } A_h_correction_diag_i_new[i + 1] = num_nonzeros_diag_new; A_h_correction_offd_i_new[i + 1] = num_nonzeros_offd_new; hypre_TFree(aux_j, HYPRE_MEMORY_HOST); hypre_TFree(aux_data, HYPRE_MEMORY_HOST); } hypre_TFree(A_h_correction_diag_i, memory_location); hypre_TFree(A_h_correction_diag_j, memory_location); hypre_TFree(A_h_correction_diag_data, memory_location); hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new; hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new; hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new; if (A_h_correction_offd_i) { hypre_TFree(A_h_correction_offd_i, memory_location); } if (A_h_correction_offd_j) { hypre_TFree(A_h_correction_offd_j, memory_location); } if (A_h_correction_offd_data) { hypre_TFree(A_h_correction_offd_data, memory_location); } hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new; hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new; hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new; } else { hypre_printf("Error!! Block ordering for non-Galerkin coarse grid is not currently supported\n"); exit(-1); } } //hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time); //hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered"); // coarse grid / schur complement hypre_ParCSRMatrixAdd(1.0, A_cc, 1.0, A_h_correction, &A_h); *A_h_ptr = A_h; //hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h"); hypre_ParCSRMatrixDestroy(A_cc); hypre_ParCSRMatrixDestroy(A_h_correction); hypre_TFree(c_marker, HYPRE_MEMORY_HOST); hypre_TFree(f_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A, HYPRE_BigInt *mgr_idx_array, HYPRE_Solver A_ff_solver) { HYPRE_Int *U_marker, *S_marker, *P_marker; HYPRE_Int n_fine, i; HYPRE_BigInt ibegin; hypre_ParCSRMatrix *A_up; hypre_ParCSRMatrix *A_uu; hypre_ParCSRMatrix *A_su; hypre_ParCSRMatrix *A_pu; hypre_ParVector *e1_vector; hypre_ParVector *e2_vector; hypre_ParVector *e3_vector; hypre_ParVector *e4_vector; hypre_ParVector *e5_vector; n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); ibegin = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(ibegin == mgr_idx_array[0]); U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { U_marker[i] = -1; S_marker[i] = -1; P_marker[i] = -1; } // create C and F markers for (i = 0; i < n_fine; i++) { if (i < mgr_idx_array[1] - ibegin) { U_marker[i] = 1; } else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin)) { S_marker[i] = 1; } else { P_marker[i] = 1; } } // Get A_up hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up); // GetA_uu hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu); // Get A_su hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su); // Get A_pu hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu); e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up), hypre_ParCSRMatrixGlobalNumCols(A_up), hypre_ParCSRMatrixColStarts(A_up)); hypre_ParVectorInitialize(e1_vector); hypre_ParVectorSetConstantValues(e1_vector, 1.0); e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e2_vector); hypre_ParVectorSetConstantValues(e2_vector, 0.0); e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e3_vector); hypre_ParVectorSetConstantValues(e3_vector, 0.0); e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su), hypre_ParCSRMatrixGlobalNumRows(A_su), hypre_ParCSRMatrixRowStarts(A_su)); hypre_ParVectorInitialize(e4_vector); hypre_ParVectorSetConstantValues(e4_vector, 0.0); e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu), hypre_ParCSRMatrixGlobalNumRows(A_pu), hypre_ParCSRMatrixRowStarts(A_pu)); hypre_ParVectorInitialize(e5_vector); hypre_ParVectorSetConstantValues(e5_vector, 0.0); // compute e2 = A_up * e1 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector); // solve e3 = A_uu^-1 * e2 hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // print e4 hypre_ParVectorPrintIJ(e4_vector, 1, "Dsp"); // compute e5 = A_pu * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector); hypre_ParVectorPrintIJ(e5_vector, 1, "Dpp"); hypre_ParVectorDestroy(e1_vector); hypre_ParVectorDestroy(e2_vector); hypre_ParVectorDestroy(e3_vector); hypre_ParCSRMatrixDestroy(A_uu); hypre_ParCSRMatrixDestroy(A_up); hypre_ParCSRMatrixDestroy(A_pu); hypre_ParCSRMatrixDestroy(A_su); hypre_TFree(U_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **A_inv) { HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version; HYPRE_Real mr_tol, nsh_tol; HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *approx_A_inv = NULL; print_level = 0; nsh_max_iter = 2; nsh_max_row_nnz = 2; // default 1000 mr_max_iter = 1; mr_tol = 1.0e-3; mr_max_row_nnz = 2; // default 800 mr_col_version = 0; nsh_tol = 1.0e-3; droptol[0] = 1.0e-2; droptol[1] = 1.0e-2; hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz, nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level); *A_inv = approx_A_inv; if (droptol) { hypre_TFree(droptol, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i, jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1) ? 1 : -1; F_marker[i] = (CF_marker[i] == 1) ? -1 : 1; } // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); // compute -Wp minus_Wp = hypre_ParMatmul(S, A_fc); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // num_threads = hypre_NumThreads(); // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++) { jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++) { jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++) { P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++) { P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i + 1] = jj_counter_offd; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } HYPRE_Int hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_ff_inv; hypre_ParCSRMatrix *W; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Int *P_offd_i; HYPRE_Int P_diag_nnz; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; HYPRE_Int i; HYPRE_Real m_one = -1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1) ? 1 : -1; F_marker[i] = (CF_marker[i] == 1) ? -1 : 1; } // Get A_FF hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff); // hypre_ParCSRMatrixPrintIJ(A_ff, 1, 1, "A_ff"); // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); hypre_MGRApproximateInverse(A_ff, &A_ff_inv); // hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv"); // hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc"); W = hypre_ParMatmul(A_ff_inv, A_fc); hypre_ParCSRMatrixScale(W, m_one); // hypre_ParCSRMatrixPrintIJ(W, 1, 1, "Wp"); hypre_CSRMatrix *W_diag = hypre_ParCSRMatrixDiag(W); hypre_CSRMatrix *W_offd = hypre_ParCSRMatrixOffd(W); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_nnz = hypre_CSRMatrixNumNonzeros(W_diag) + hypre_CSRMatrixNumCols(W_diag); P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_nnz, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_nnz, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); /* Extend W data to P data */ hypre_ExtendWtoPHost( n_fine, CF_marker, hypre_CSRMatrixI(W_diag), hypre_CSRMatrixJ(W_diag), hypre_CSRMatrixData(W_diag), P_diag_i, P_diag_j, P_diag_data, hypre_CSRMatrixI(W_offd), P_offd_i ); // final P P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, hypre_CSRMatrixNumCols(W_offd), P_diag_nnz, hypre_CSRMatrixNumNonzeros(W_offd) ); hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(P)) = memory_location_P; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(P)) = memory_location_P; hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(P)) = P_diag_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixDiag(P)) = P_diag_j; hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(P)) = P_diag_data; hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(P)) = P_offd_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixJ(W_offd); hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixData(W_offd); hypre_CSRMatrixJ(W_offd) = NULL; hypre_CSRMatrixData(W_offd) = NULL; num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(W); if (hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(P))) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(P)) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_ff_inv); hypre_ParCSRMatrixDestroy(W); return 0; } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P, HYPRE_Int interp_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; //HYPRE_Real jac_trunc_threshold = trunc_factor; //HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); /* Interpolation for each level */ if (interp_type < 3) { if (exec == HYPRE_EXEC_HOST) { // hypre_MGRBuildP(A, CF_marker, num_cpts_global, interp_type, debug_flag, &P_ptr); hypre_MGRBuildPHost(A, CF_marker, num_cpts_global, interp_type, &P_ptr); //hypre_ParCSRMatrixPrintIJ(P_ptr, 0, 0, "P_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(A, CF_marker, num_cpts_global, interp_type, &P_ptr); //hypre_ParCSRMatrixPrintIJ(P_ptr, 0, 0, "P_device"); } #endif /* Could do a few sweeps of Jacobi to further improve Jacobi interpolation P */ /* if(interp_type == 2) { for(i=0; i<numsweeps; i++) { hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); } hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } */ } else if (interp_type == 4) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } #if defined(HYPRE_USING_CUDA) else { hypre_NoGPUSupport("interpolation"); } #endif } /* else if (interp_type == 99) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } #if defined(HYPRE_USING_CUDA) else { hypre_NoGPUSupport("interpolation"); } #endif } */ else if (interp_type == 5) { hypre_BoomerAMGBuildModExtInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else if (interp_type == 6) { hypre_BoomerAMGBuildModExtPIInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else if (interp_type == 7) { hypre_BoomerAMGBuildModExtPEInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } /* Setup restriction operator */ HYPRE_Int hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Real strong_threshold, HYPRE_Real max_row_sum, hypre_ParCSRMatrix **R, HYPRE_Int restrict_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *R_ptr = NULL; hypre_ParCSRMatrix *AT = NULL; hypre_ParCSRMatrix *ST = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); /* Build AT (transpose A) */ if (restrict_type > 0) { hypre_ParCSRMatrixTranspose(A, &AT, 1); } /* Restriction for each level */ if (restrict_type == 0) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(A, CF_marker, num_cpts_global, restrict_type, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_device"); } #endif } else if (restrict_type == 1 || restrict_type == 2) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(AT, CF_marker, num_cpts_global, restrict_type, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_device"); } #endif } else if (restrict_type == 3) { /* move diagonal to first entry */ hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(AT)); hypre_MGRBuildInterpApproximateInverse(AT, CF_marker, num_cpts_global, debug_flag, &R_ptr); hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else { /* Build new strength matrix */ hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST); /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &R_ptr); } /* set pointer to P */ *R = R_ptr; /* Free memory */ if (restrict_type > 0) { hypre_ParCSRMatrixDestroy(AT); } if (restrict_type > 5) { hypre_ParCSRMatrixDestroy(ST); } return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22 * a33 * a44 + a23 * a34 * a42 + a24 * a32 * a43 - a22 * a34 * a43 - a23 * a32 * a44 - a24 * a33 * a42; const HYPRE_Real M12 = a12 * a34 * a43 + a13 * a32 * a44 + a14 * a33 * a42 - a12 * a33 * a44 - a13 * a34 * a42 - a14 * a32 * a43; const HYPRE_Real M13 = a12 * a23 * a44 + a13 * a24 * a42 + a14 * a22 * a43 - a12 * a24 * a43 - a13 * a22 * a44 - a14 * a23 * a42; const HYPRE_Real M14 = a12 * a24 * a33 + a13 * a22 * a34 + a14 * a23 * a32 - a12 * a23 * a34 - a13 * a24 * a32 - a14 * a22 * a33; const HYPRE_Real M21 = a21 * a34 * a43 + a23 * a31 * a44 + a24 * a33 * a41 - a21 * a33 * a44 - a23 * a34 * a41 - a24 * a31 * a43; const HYPRE_Real M22 = a11 * a33 * a44 + a13 * a34 * a41 + a14 * a31 * a43 - a11 * a34 * a43 - a13 * a31 * a44 - a14 * a33 * a41; const HYPRE_Real M23 = a11 * a24 * a43 + a13 * a21 * a44 + a14 * a23 * a41 - a11 * a23 * a44 - a13 * a24 * a41 - a14 * a21 * a43; const HYPRE_Real M24 = a11 * a23 * a34 + a13 * a24 * a31 + a14 * a21 * a33 - a11 * a24 * a33 - a13 * a21 * a34 - a14 * a23 * a31; const HYPRE_Real M31 = a21 * a32 * a44 + a22 * a34 * a41 + a24 * a31 * a42 - a21 * a34 * a42 - a22 * a31 * a44 - a24 * a32 * a41; const HYPRE_Real M32 = a11 * a34 * a42 + a12 * a31 * a44 + a14 * a32 * a41 - a11 * a32 * a44 - a12 * a34 * a41 - a14 * a31 * a42; const HYPRE_Real M33 = a11 * a22 * a44 + a12 * a24 * a41 + a14 * a21 * a42 - a11 * a24 * a42 - a12 * a21 * a44 - a14 * a22 * a41; const HYPRE_Real M34 = a11 * a24 * a32 + a12 * a21 * a34 + a14 * a22 * a31 - a11 * a22 * a34 - a12 * a24 * a31 - a14 * a21 * a32; const HYPRE_Real M41 = a21 * a33 * a42 + a22 * a31 * a43 + a23 * a32 * a41 - a21 * a32 * a43 - a22 * a33 * a41 - a23 * a31 * a42; const HYPRE_Real M42 = a11 * a32 * a43 + a12 * a33 * a41 + a13 * a31 * a42 - a11 * a33 * a42 - a12 * a31 * a43 - a13 * a32 * a41; const HYPRE_Real M43 = a11 * a23 * a42 + a12 * a21 * a43 + a13 * a22 * a41 - a11 * a22 * a43 - a12 * a23 * a41 - a13 * a21 * a42; const HYPRE_Real M44 = a11 * a22 * a33 + a12 * a23 * a31 + a13 * a21 * a32 - a11 * a23 * a32 - a12 * a21 * a33 - a13 * a22 * a31; const HYPRE_Real det = a11 * M11 + a12 * M21 + a13 * M31 + a14 * M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { //hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0 / det; a[0] = M11 * det_inv; a[1] = M12 * det_inv; a[2] = M13 * det_inv; a[3] = M14 * det_inv; a[4] = M21 * det_inv; a[5] = M22 * det_inv; a[6] = M23 * det_inv; a[7] = M24 * det_inv; a[8] = M31 * det_inv; a[9] = M32 * det_inv; a[10] = M33 * det_inv; a[11] = M34 * det_inv; a[12] = M41 * det_inv; a[13] = M42 * det_inv; a[14] = M43 * det_inv; a[15] = M44 * det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i, j, k, l, u, kn, in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k = 0; k < n; ++k) { kn = k * n; l = kn + k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0 / a[l]; a[l] = alinv; for (j = 0; j < k; ++j) { u = kn + j; a[u] *= alinv; } for (j = k + 1; j < n; ++j) { u = kn + j; a[u] *= alinv; } for (i = 0; i < k; ++i) { in = i * n; for (j = 0; j < n; ++j) if (j != k) { u = in + j; a[u] -= a[in + k] * a[kn + j]; } // end if (j!=k) } for (i = k + 1; i < n; ++i) { in = i * n; for (j = 0; j < n; ++j) if (j != k) { u = in + j; a[u] -= a[in + k] * a[kn + j]; } // end if (j!=k) } for (i = 0; i < k; ++i) { u = i * n + k; a[u] *= -alinv; } for (i = k + 1; i < n; ++i) { u = i * n + k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i, ii; HYPRE_Int j, jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size, inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A, blk_size, reserved_coarse_size, &(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0; k < blk_size; k++) { B_diag_i[i * blk_size + k] = i * nb2 + k * blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; B_diag_j[bidx] = i * blk_size + j; B_diag_data[bidx] = diaginv[k * blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; *B_ptr = B; return (block_scaling_error); } HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Int method, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size * blk_size; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { for (j = 0; j < blk_size; j++) { bidx = i * blk_size + j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++) { ii = A_diag_j[jj]; if (method == 0) { // Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } else if (method == 1) { // Gauss-Seidel for diagonal part res[j] -= A_diag_data[jj] * u_data[ii]; } else { // Default do Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++) { // always do Jacobi for off-diagonal part ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0; j < blk_size; j++) { bidx1 = i * blk_size + j; for (k = 0; k < blk_size; k++) { bidx = i * nb2 + j * blk_size + k; u_data[bidx1] += res[k] * diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return (relax_error); } HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size * blk_size; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { for (j = 0; j < blk_size; j++) { bidx = i * blk_size + j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++) { ii = A_diag_j[jj]; //res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]); res[j] -= A_diag_data[jj] * u_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0; j < blk_size; j++) { bidx1 = i * blk_size + j; for (k = 0; k < blk_size; k++) { bidx = i * nb2 + j * blk_size + k; u_data[bidx1] += res[k] * diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return (relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int n_block; HYPRE_Int left_size, inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; if (diaginv != NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i * nb2 + k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0; i < left_size; i++) { bidxm1 = n_block * nb2 + i * blk_size; bidxp1 = n_block * nb2 + (i + 1) * blk_size; for (j = 0; j < left_size; j++) { bidx = n_block * nb2 + i * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block * blk_size) { bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0; i < n_block; i++) { hypre_blas_mat_inv(diaginv + i * nb2, blk_size); } hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size); } else { for (i = 0; i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) { diaginv[i] = 0.0; } else { diaginv[i] = 1.0 / diaginv[i]; } } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int method, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int n_block; HYPRE_Int left_size, inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i * nb2 + k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0; i < left_size; i++) { bidxm1 = n_block * nb2 + i * blk_size; bidxp1 = n_block * nb2 + (i + 1) * blk_size; for (j = 0; j < left_size; j++) { bidx = n_block * nb2 + i * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block * blk_size) { bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0; i < n_block; i++) { hypre_blas_mat_inv(diaginv + i * nb2, blk_size); } hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0; i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) { diaginv[i] = 0.0; } else { diaginv[i] = 1.0 / diaginv[i]; } } } hypre_blockRelax_solve(A, f, u, blk_size, n_block, left_size, method, diaginv, Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return (relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetFSolver( void *mgr_vdata, HYPRE_Int (*fine_grid_solver_solve)(void*, void*, void*, void*), HYPRE_Int (*fine_grid_solver_setup)(void*, void*, void*, void*), void *fsolver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); HYPRE_Solver **aff_solver = (mgr_data -> aff_solver); if (aff_solver == NULL) { aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST); } /* only allow to set F-solver for the first level */ aff_solver[0] = (HYPRE_Solver *) fsolver; (mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve; (mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup; (mgr_data -> aff_solver) = aff_solver; (mgr_data -> fsolver_mode) = 0; return hypre_error_flag; } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*, void*, void*, void*), HYPRE_Int (*coarse_grid_solver_setup)(void*, void*, void*, void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetAffInv( void *mgr_vdata, hypre_ParCSRMatrix *A_ff_inv ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> A_ff_inv) = A_ff_inv; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method; } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (relax_method != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = 0; } } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/ HYPRE_Int hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> use_non_galerkin_cg) != NULL) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (cg_method != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = cg_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = 0; } } (mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg; return hypre_error_flag; } /* Set the F-relaxation number of functions for each level */ HYPRE_Int hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_num_functions) != NULL) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (num_functions != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = num_functions[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = 1; } } (mgr_data -> Frelax_num_functions) = Frelax_num_functions; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (restrict_type != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = *(restrict_type + i); } } else { for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = 0; } } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = restrict_type; } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = interpType; } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (interpType != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = *(interpType + i); } } else { for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = 2; } } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set the threshold to truncate the coarse grid at each * level of reduction */ HYPRE_Int hypre_MGRSetTruncateCoarseGridThreshold( void *mgr_vdata, HYPRE_Real threshold) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> truncate_coarse_grid_threshold) = threshold; return hypre_error_flag; } /* Set print level for F-relaxation solver */ HYPRE_Int hypre_MGRSetFrelaxPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> frelax_print_level) = print_level; return hypre_error_flag; } /* Set print level for coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseGridPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> cg_print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set logging level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr global smoother */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set global smoothing type for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Set the maximum number of non-zero entries for restriction and interpolation operator if classical AMG interpolation is used */ HYPRE_Int hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> P_max_elmts) = P_max_elmts; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata, HYPRE_Real *conv_factor ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *conv_factor = (mgr_data -> cg_convergence_factor); return hypre_error_flag; } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A, HYPRE_Int *row_cf_marker, HYPRE_Int *col_cf_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_block_ptr ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_IntArray *coarse_dof_func_ptr = NULL; HYPRE_BigInt num_row_cpts_global[2]; HYPRE_BigInt num_col_cpts_global[2]; hypre_ParCSRMatrix *Ablock; HYPRE_BigInt *col_map_offd_Ablock; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *Ablock_diag; hypre_CSRMatrix *Ablock_offd; HYPRE_Real *Ablock_diag_data; HYPRE_Int *Ablock_diag_i; HYPRE_Int *Ablock_diag_j; HYPRE_Real *Ablock_offd_data; HYPRE_Int *Ablock_offd_i; HYPRE_Int *Ablock_offd_j; HYPRE_Int Ablock_diag_size, Ablock_offd_size; HYPRE_Int *Ablock_marker; HYPRE_Int ii_counter; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int *col_coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_row_cpts; HYPRE_BigInt total_global_col_cpts; HYPRE_Int num_cols_Ablock_offd; // HYPRE_BigInt my_first_row_cpt, my_first_col_cpt; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); hypre_IntArray *wrap_cf; // HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; /* get the number of coarse rows */ wrap_cf = hypre_IntArrayCreate(local_numrows); hypre_IntArrayMemoryLocation(wrap_cf) = HYPRE_MEMORY_HOST; hypre_IntArrayData(wrap_cf) = row_cf_marker; hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr, num_row_cpts_global); hypre_IntArrayDestroy(coarse_dof_func_ptr); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]); // my_first_row_cpt = num_row_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_row_cpts = num_row_cpts_global[1]; } hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /* get the number of coarse rows */ hypre_IntArrayData(wrap_cf) = col_cf_marker; hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr, num_col_cpts_global); hypre_IntArrayDestroy(coarse_dof_func_ptr); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]); // my_first_col_cpt = num_col_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_col_cpts = num_col_cpts_global[1]; } hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * First Pass: Determine size of Ablock and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (col_cf_marker[i] > 0) { fine_to_coarse[i] = col_coarse_counter[j]; col_coarse_counter[j]++; } if (row_cf_marker[i] > 0) { //fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; coarse_counter[i + 1] += coarse_counter[i]; col_coarse_counter[i + 1] += col_coarse_counter[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; ii_counter = coarse_counter[i]; Ablock_diag_size = jj_counter; Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location); Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, memory_location); Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, memory_location); Ablock_diag_i[ii_counter] = jj_counter; Ablock_offd_size = jj_counter_offd; Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location); Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, memory_location); Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, memory_location); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; //----------------------------------------------------------------------- // Send and receive fine_to_coarse info. //----------------------------------------------------------------------- // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = col_coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif // for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt; #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } ii_counter = 0; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (row_cf_marker[i] > 0) { // Diagonal part of Ablock // Ablock_diag_i[ii_counter] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { Ablock_diag_j[jj_counter] = fine_to_coarse[i1]; Ablock_diag_data[jj_counter] = A_diag_data[jj]; jj_counter++; } } // Off-Diagonal part of Ablock // Ablock_offd_i[ii_counter] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { Ablock_offd_j[jj_counter_offd] = i1; Ablock_offd_data[jj_counter_offd] = A_offd_data[jj]; jj_counter_offd++; } } } ii_counter++; } } Ablock_offd_i[ii_counter] = jj_counter_offd; Ablock_diag_i[ii_counter] = jj_counter; } Ablock = hypre_ParCSRMatrixCreate(comm, total_global_row_cpts, total_global_col_cpts, num_row_cpts_global, num_col_cpts_global, 0, Ablock_diag_i[ii_counter], Ablock_offd_i[ii_counter]); Ablock_diag = hypre_ParCSRMatrixDiag(Ablock); hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data; hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i; hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j; Ablock_offd = hypre_ParCSRMatrixOffd(Ablock); hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data; hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i; hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j; num_cols_Ablock_offd = 0; if (Ablock_offd_size) { Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { Ablock_marker[i] = 0; } num_cols_Ablock_offd = 0; for (i = 0; i < Ablock_offd_size; i++) { index = Ablock_offd_j[i]; if (!Ablock_marker[index]) { num_cols_Ablock_offd++; Ablock_marker[index] = 1; } } col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, memory_location); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_Ablock_offd; i++) { while (Ablock_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < Ablock_offd_size; i++) Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd, Ablock_offd_j[i], num_cols_Ablock_offd); hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST); } if (num_cols_Ablock_offd) { hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock; hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd; } hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd); /* Create the assumed partition */ if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(Ablock); } *A_block_ptr = Ablock; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRBuildAff( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int i; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* create a copy of the CF_marker array and switch C-points to F-points */ HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < local_numrows; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr); /* Free copy of CF marker */ hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return (0); } /********************************************************************************* * This routine assumes that the 'toVector' is larger than the 'fromVector' and * the CF_marker is of the same length as the toVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'fromVector'. * It adds the values of the 'fromVector' to the 'toVector' where the marker is the * same as the 'point_type' *********************************************************************************/ HYPRE_Int hypre_MGRAddVectorP ( hypre_IntArray *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int *CF_marker_data = hypre_IntArrayData(CF_marker); //HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int n = hypre_IntArraySize(CF_marker); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker_data[i] == point_type) { toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j]; j++; } } return 0; } /************************************************************************************* * This routine assumes that the 'fromVector' is larger than the 'toVector' and * the CF_marker is of the same length as the fromVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'toVector'. * It adds the values of the 'fromVector' where the marker is the * same as the 'point_type' to the 'toVector' *************************************************************************************/ HYPRE_Int hypre_MGRAddVectorR ( hypre_IntArray *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int *CF_marker_data = hypre_IntArrayData(CF_marker); //HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int n = hypre_IntArraySize(CF_marker); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker_data[i] == point_type) { toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i]; j++; } } return 0; } /* HYPRE_Int hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } */ /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); hypre_printf("MGR Setup parameters: \n"); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F)); hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method)); for (i = 0; i < max_num_coarse_levels; i++) { hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]); hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]); hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]); hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i, (mgr_data -> use_non_galerkin_cg)[i]); HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i]; hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points); hypre_printf("Cpoints indices: "); for (j = 0; j < lvl_num_coarse_points; j++) { if ((mgr_data -> block_cf_marker)[i][j] == 1) { hypre_printf("%d ", j); } } hypre_printf("\n"); } hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver)); if ((mgr_data -> fsolver_mode) >= 0) { hypre_printf("Use AMG solver for full AMG F-relaxation: %d\n", (mgr_data -> fsolver_mode)); } return hypre_error_flag; } #ifdef HYPRE_USING_DSUPERLU void * hypre_MGRDirectSolverCreate() { hypre_DSLUData *dslu_data = hypre_CTAlloc(hypre_DSLUData, 1, HYPRE_MEMORY_HOST); return (void *) dslu_data; } HYPRE_Int hypre_MGRDirectSolverSetup( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { /* Par Data Structure variables */ HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_local; HYPRE_Int num_rows; HYPRE_Int num_procs, my_id; HYPRE_Int pcols = 1, prows = 1; HYPRE_BigInt *big_rowptr = NULL; hypre_DSLUData *dslu_data = (hypre_DSLUData *) solver; HYPRE_Int info = 0; HYPRE_Int nrhs = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* Merge diag and offd into one matrix (global ids) */ A_local = hypre_MergeDiagAndOffd(A); num_rows = hypre_CSRMatrixNumRows(A_local); /* Now convert hypre matrix to a SuperMatrix */ #ifdef HYPRE_MIXEDINT { HYPRE_Int *rowptr = NULL; HYPRE_Int i; rowptr = hypre_CSRMatrixI(A_local); big_rowptr = hypre_CTAlloc(HYPRE_BigInt, (num_rows + 1), HYPRE_MEMORY_HOST); for (i = 0; i < (num_rows + 1); i++) { big_rowptr[i] = (HYPRE_BigInt)rowptr[i]; } } #else big_rowptr = hypre_CSRMatrixI(A_local); #endif dCreate_CompRowLoc_Matrix_dist( &(dslu_data->A_dslu), global_num_rows, global_num_rows, hypre_CSRMatrixNumNonzeros(A_local), num_rows, hypre_ParCSRMatrixFirstRowIndex(A), hypre_CSRMatrixData(A_local), hypre_CSRMatrixBigJ(A_local), big_rowptr, SLU_NR_loc, SLU_D, SLU_GE); /* DOK: SuperLU frees assigned data, so set them to null before * calling hypre_CSRMatrixdestroy on A_local to avoid memory errors. */ #ifndef HYPRE_MIXEDINT hypre_CSRMatrixI(A_local) = NULL; #endif hypre_CSRMatrixData(A_local) = NULL; hypre_CSRMatrixBigJ(A_local) = NULL; hypre_CSRMatrixDestroy(A_local); /*Create process grid */ while (prows * pcols <= num_procs) { ++prows; } --prows; pcols = num_procs / prows; while (prows * pcols != num_procs) { prows -= 1; pcols = num_procs / prows; } //hypre_printf(" prows %d pcols %d\n", prows, pcols); superlu_gridinit(comm, prows, pcols, &(dslu_data->dslu_data_grid)); set_default_options_dist(&(dslu_data->dslu_options)); dslu_data->dslu_options.Fact = DOFACT; dslu_data->dslu_options.PrintStat = NO; /*dslu_data->dslu_options.IterRefine = SLU_DOUBLE; dslu_data->dslu_options.ColPerm = MMD_AT_PLUS_A; dslu_data->dslu_options.DiagPivotThresh = 1.0; dslu_data->dslu_options.ReplaceTinyPivot = NO; */ dScalePermstructInit(global_num_rows, global_num_rows, &(dslu_data->dslu_ScalePermstruct)); dLUstructInit(global_num_rows, &(dslu_data->dslu_data_LU)); PStatInit(&(dslu_data->dslu_data_stat)); dslu_data->global_num_rows = global_num_rows; dslu_data->berr = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); dslu_data->berr[0] = 0.0; pdgssvx(&(dslu_data->dslu_options), &(dslu_data->A_dslu), &(dslu_data->dslu_ScalePermstruct), NULL, num_rows, nrhs, &(dslu_data->dslu_data_grid), &(dslu_data->dslu_data_LU), &(dslu_data->dslu_solve), dslu_data->berr, &(dslu_data->dslu_data_stat), &info); dslu_data->dslu_options.Fact = FACTORED; return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverSolve( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { hypre_SLUDistSolve(solver, f, u); return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverDestroy( void *solver ) { hypre_SLUDistDestroy(solver); return hypre_error_flag; } #endif
ops.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #pragma once #ifndef OPS_H_ #define OPS_H_ #include <op_boilerplate.h> #include <array/DataTypeUtils.h> #include <helpers/shape.h> #include <vector> #include <Environment.h> #include <loops/summarystatsreduce.h> #define MIN 1e-12 #define MAX_FLOAT 1e37 #define MIN_FLOAT 1e-37 #define MAX_INT 2147483647 #define MIN_CUTFOFF -3.79297773665f #define FLOAT_MIN_NORMAL 1.17549435e-38 #define EPS 1e-5 #define AFFINITY close #define DOUBLE_PI_T T(2.0 * 3.14159265358979323846) #define DOUBLE_PI_X X(2.0 * 3.14159265358979323846) #define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #ifdef __CUDACC__ #include <helpers/sharedmem.h> #define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer,Z *result, Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #else // hacky fix for isnan/being being out of scope //#ifdef IOS //#define isinf(x) 0 // this isn't right. But std::isinf fails //#define isnan(x) 0 //#else //#define isnan std::isnan //#define isinf std::isinf //#endif #define no_op_exec_special_cuda #define no_op_exec_special_accumulation_cuda #define no_op_exec_special_accumulation_same_cuda #define no_op_exec_special_accumulation_long_cuda #define no_op_exec_special_any_cuda #define no_op_exec_special_bool_cuda #define no_op_exec_special_same_cuda #define no_op_exec_special_accumulation_same_cuda #endif #define SELU_ALPHA 1.6732632423543772848170429916717 #define SELU_LAMBDA 1.0507009873554804934193349852946 #ifdef _OPENMP #pragma omp declare reduction(maxT : float,double,float16,bfloat16 : \ omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=-MAX_FLOAT) #pragma omp declare reduction(minT : float,double,float16,bfloat16 : \ omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=MAX_FLOAT) #pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = omp_in + omp_out)\ initializer (omp_priv=0) #endif namespace functions { namespace indexreduce { template <typename T> struct IndexValue { T value; Nd4jLong index; _CUDA_HD IndexValue() = default; _CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {} }; } namespace summarystats { template <typename T> class SummaryStatsData; } } namespace simdOps { template <typename X, typename Y, typename Z> class Add { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 + d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 + d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 + params[0]); } op_def static X startingValue() { return static_cast<X>(0.f); } }; template <typename X, typename Y> class NewAdd { public: op_def static X op(X d1, Y d2, X *params) { return d1 + d2; } }; template <typename X, typename Y, typename Z> class Subtract { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 - d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 - d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 - params[0]); } }; template <typename X, typename Y, typename Z> class SquaredSubtract { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - d2), 2.f); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - d2), 2.f); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - params[0]), 2.f); } }; template <typename X, typename Y, typename Z> class ReverseSubtract { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 - d1); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2 - d1); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(params[0] - d1); } }; template <typename X, typename Y, typename Z> class LogPoissonLossFull { public: op_def static Z op(X z, Y c) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz))); } op_def static Z op(X z, Y c, Z *params) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz))); } op_def static Z op(X z) { auto zz = static_cast<Z>(z); return (zz * nd4j::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)); } // op for MetaOps op_def static X op(X z, Y *params) { return (nd4j::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * nd4j::math::nd4j_log<X, Z>(DOUBLE_PI_X * z))); } }; template <typename X, typename Y, typename Z> class LogPoissonLoss { public: op_def static Z op(X z, Y c) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc); } op_def static Z op(X z, Y c, Z *params) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc); } op_def static Z op(X z) { return static_cast<Z>(z); } // op for MetaOps op_def static Z op(X z, Y *params) { return (nd4j::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0])); } }; template <typename X, typename Y, typename Z> class Multiply { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 * d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 * d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 * params[0]); } op_def static X startingValue() { return static_cast<X>(1.f); } }; template <typename X, typename Y, typename Z> class Divide { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 / d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 / d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 / params[0]); } op_def static X startingValue() { return static_cast<X>(1); } }; template <typename X, typename Y, typename Z> class SafeDivide { public: op_def static Z op(X d1, Y d2) { if(d2 == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / d2); } op_def static Z op(X d1, Y d2, Z *params) { if(d2 == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { if(params[0] == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / params[0]); } }; template <typename X, typename Y, typename Z> class FloorDiv { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2)); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2)); } op_def static Z op(X d1) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1)); } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0])); } }; template <typename X, typename Y, typename Z> class TruncateDiv { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 / i2); } op_def static Z op(X d1, Y d2, Z *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 / i2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(params[0]); return static_cast<Z>(i1 / i2); } }; template <typename X, typename Y, typename Z> class TruncateMod { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 % i2); } op_def static Z op(X d1, Y d2, Z *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 % i2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(params[0]); return static_cast<Z>(i1 % i2); } }; template<typename X, typename Y, typename Z> class Remainder { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, params[0]); } }; template <typename X, typename Y, typename Z> class FMod { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, params[0]); } }; template <typename X, typename Y, typename Z> class FloorMod { public: op_def static Z op(X d1, Y d2) { auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2); } op_def static Z op(X d1, Y d2, Z *params) { auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X, typename Y, typename Z> class ReverseDivide { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 / d1); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2 / d1); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(params[0] / d1); } }; template <typename X, typename Y, typename Z> class CopyPws { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1); } }; template <typename X> class Copy { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1; } }; template <typename X, typename Y, typename Z> class Copy2 { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1); } }; template <typename X, typename Y, typename Z> class Axpy { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 + d1); } op_def static Z op(X d1, Y d2, Z *params) { auto alpha = params[0]; return alpha * static_cast<Z>(d1) + static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } }; template <typename X, typename Z> class Assign { public: no_op_exec_special_any no_op_exec_special_any_cuda op_def static Z op(X d1, X *params) { return static_cast<Z>(d1); } }; template <typename X, typename Z> class And { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, X *params) { return static_cast<Z>(119); } }; template <typename X, typename Z> class Or { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, X *params) { return static_cast<Z>(119); } }; template <typename X, typename Z> class Xor { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Z> class Not { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return static_cast<Z>(0); } op_def static Z op(X d1, X d2, X *params) { return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0); } // this transform op should run only on boolean input op_def static Z op(X d1, X *params) { auto b1 = static_cast<bool>(d1); return !b1; } }; template <typename X, typename Y, typename Z> class LogicalNot { public: op_def static Z op(X d1, Y d2) { return !((int) d1 && (int) d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2))); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<X>(119); } }; template <typename X, typename Y, typename Z> class LogicalXor { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return (i1 | i2) &~ (i1 & i2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(119); } }; template <typename X, typename Y, typename Z> class LogicalAnd { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) & static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(Y d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(119); } }; template <typename X, typename Y, typename Z> class LogicalOr { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) | static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<X>(119); } }; template <typename X, typename Y, typename Z> class Mod { public: /* // just a optional note, feel free to remove later op_def static half op(half d1, half d2, half *params) { return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr)); } */ op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) % static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOp op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X, typename Y, typename Z> class ReverseMod { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d2) % static_cast<int>(d1); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOp op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; /** * Whether 2 elements in an array * are epsilion equal */ template <typename X, typename Z> class Epsilon { public: op_def static Z op(X d1, X d2) { X diff = d1 - d2; X absDiff = nd4j::math::nd4j_abs<X>(diff); if (absDiff <= static_cast<X>(MIN)) return static_cast<Z>(1); return static_cast<Z>(0); } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class EqualTo { public: op_def static Z op(X d1, X d2) { return d1 == d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class NotEqualTo { public: op_def static Z op(X d1, X d2) { return d1 != d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class GreaterThanOrEqual { public: op_def static Z op(X d1, X d2) { return d1 >= d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } // FIXME: this signature clashes with MetaOp stuff op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class GreaterThan { public: op_def static Z op(X d1, X d2) { return d1 > d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } // FIXME: this signature clashes with MetaOp stuff op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class LessThan { public: op_def static Z op(X d1, X d2) { return d1 < d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class LessThanOrEqual { public: op_def static Z op(X d1, X d2) { return d1 <= d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X> class Abs { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_abs<X>(d1); } }; template <typename X> class Ceiling { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_ceil<X,X>(d1); } }; template <typename X> class Cosine { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cos<X,X>(d1); } }; template <typename X> class Exp { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_exp<X, X>(d1); } }; template <typename X> class HardTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f)); } }; template <typename X> class HardTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 < static_cast<X>(-1)) return static_cast<X>(-1); else if (d1 > static_cast<X>(1)) return static_cast<X>(1); else return d1; } }; template <typename X> class Floor { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_floor<X,X>(d1); } }; template <typename X> class Log { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(d1); } }; template <typename X> class Log1p { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(1 + d1); } }; template <typename X, typename Y, typename Z> class LogX { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_log<X, Z>(d1) / nd4j::math::nd4j_log<Y, Z>(d2) ; } }; template <typename X> class StabilizeFP16 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 <= static_cast<X>(0)) return static_cast<X>(nd4j::DataTypeUtils::min<float16>()); else return d1; } }; template <typename X> class StabilizeX { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 <= static_cast<X>(0)) return nd4j::DataTypeUtils::min<X>(); else return d1; } }; template <typename X> class SpecialDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * (static_cast<X>(1.f) - d1); } }; template <typename X> class Neg { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return -d1; } }; template <typename X> class Erf { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_erf<X,X>(d1); } }; template <typename X> class Erfc { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_erfc<X,X>(d1); } }; template <typename X> class Reciprocal { public: no_op_exec_special_same no_op_exec_special_same_cuda // op_def static T op(T d1) { // return (T(1.0f) / d1); // } // op for MetaOps op_def static X op(X d1, X *params) { return (static_cast<X>(1) / d1); } }; template <typename X, typename Z> class Sqr { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)); } op_def static Z op(X d1) { return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)); } }; template <typename X, typename Y, typename Z> class RelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_re<X>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class BinaryRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { X threshold = params[0]; return nd4j::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class BinaryMinimumAbsoluteRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, X *params) { X d2 = params[0]; X thresholdRelative = params[1]; X thresholdAbsolute = params[2]; return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0); } op_def static Z op(X d1, Y d2, Z *params) { X thresholdRelative = params[0]; X thresholdAbsolute = params[1]; return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class Pow { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_pow<X, X, Z>(d1, params[0]); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Y, typename Z> class PowDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return params[0] * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f)); } op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f)); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f)); } op_def static Z op(X d1) { return d1; } }; template <typename X> class Round { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_round<X,X>(d1); } }; template <typename X, typename Z> class IsNan { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X> class Expm1 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(1); } }; template <typename X, typename Z> class IsPositive { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return d1 > (X)0.f; } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class IsInf { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class IsInfOrNan{ public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class IsFinite { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X> class ClipByValue { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 > params[1]) return params[1]; if (d1 < params[0]) return params[0]; return d1; } }; template <typename X, typename Y, typename Z> class LstmClip { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { X _v = (X) d2; if (d1 > _v) return _v; else if (d1 < _v) return _v; else return d1; } }; template <typename X> class Swish { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * nd4j::math::nd4j_sigmoid<X,X>(d1); } }; template <typename X> class SwishDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X ex = nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1); return (ex * (d1 + ex + static_cast<X>(1.f))) / nd4j::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f)); } }; template <typename X> class LogSigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(nd4j::math::nd4j_sigmoid<X, X>(d1)); } }; template <typename X> class LogSigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X ex = nd4j::math::nd4j_pow<X, X, X>(M_E, d1); return static_cast<X>(1.f) / (ex + static_cast<X>(1.f)); } }; template <typename X> class Sigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sigmoid<X, X>(d1); } }; template <typename X> class SigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sigmoidderivative<X, X>(d1); } }; template <typename X> class HardSigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_min<X>(static_cast<X>(1), nd4j::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f))); } }; template <typename X> class HardSigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f); } }; /** * Scale to be between a min and max */ template <typename X> class SetRange { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto min = params[0]; auto max = params[1]; if (static_cast<X>(d1) >= min && static_cast<X>(d1) <= max) return d1; if (min == static_cast<X>(0) && max == static_cast<X>(1)) { auto val = static_cast<X>(1) / (static_cast<X>(1) + nd4j::math::nd4j_exp<X, X>(-d1)); return (nd4j::math::nd4j_floor<X,X>(val * (max - min)) + min); } return (nd4j::math::nd4j_floor<X,X>(d1 * (max - min)) + min); } }; template <typename X> class Sin { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sin<X,X>(d1); } }; template <typename X> class Square { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * d1; } }; template <typename X, typename Z> class Sqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_sqrt<X, Z>(d1); } }; template <typename X, typename Z> class RSqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return static_cast<Z>(1) / nd4j::math::nd4j_sqrt<X, Z>(d1); } }; template <typename X> class Rint { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_rint<X,X>(d1); } }; template <typename X> class SoftPlus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::softplus<X, X>(d1); } }; template <typename X> class Sign { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0)); } }; template <typename X> class TimesOneMinus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * (static_cast<X>(1) - d1); } }; template <typename X> class RationalTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { // keep 2/3 as runtime variable, to match precision auto dis = (static_cast<X>(2) / static_cast<X>(3)) * d1; auto tanh = nd4j::math::nd4j_sgn<X,X>(dis) * (static_cast<X>(1) - (static_cast<X>(1) / (static_cast<X>(1) + static_cast<X>(nd4j::math::nd4j_abs<X>(dis)) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)) ))); return static_cast<X>(1.7159f) * tanh; } }; template <typename X> class RationalTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1; auto a = static_cast<X>(1.f) + nd4j::math::nd4j_abs<X>(dis) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)); auto tDeriv = (static_cast<X>(1.f) + nd4j::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a); return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv; } }; template <typename X> class Tanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tanh<X, X>(d1); } }; template <typename X> class RectifiedTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_max<X>(static_cast<X>(0), nd4j::math::nd4j_tanh<X,X>(d1)); } }; template <typename X> class RectifiedTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.f) ? nd4j::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f); } }; template <typename X> class ATanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_atanh<X,X>(d1); } }; template <typename X> class TanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tanhderivative<X,X>(d1); } }; template <typename X> class Cube { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * d1 * d1; } }; template <typename X> class CubeDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(3) * d1 * d1; } }; template <typename X> class ACos { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_acos<X, X>(d1); } }; template <typename X> class ASinh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_asinh<X, X>(d1); } }; template <typename X> class ASinhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(nd4j::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f))); } }; template <typename X> class ACosh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_acosh<X, X>(d1); } }; template <typename X> class ACoshDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * nd4j::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f))); } }; template <typename X> class Ones { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.0f); } }; template <typename X> class SoftSign { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_softsign<X, X>(d1); } }; template <typename X> class SoftSignDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_softsignderivative<X,X>(d1); } }; template <typename X, typename Z> class MatchConditionBool { public: no_op_exec_special_bool no_op_exec_special_bool_cuda // this op return 1.0 if condition met, 0.0 otherwise op_def static Z op(X d1, X *extraParams) { X compare = extraParams[0]; X eps = extraParams[1]; auto mode = static_cast<int>(extraParams[2]); //nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode); switch (mode) { case 0: // equals return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false; case 1: // not equals return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? true : false; case 2: // less_than return d1 < compare ? true : false; case 3: // greater_than return d1 > compare ? true : false; case 4: // less_or_equals_than return d1 <= compare ? true : false; case 5: // greater_or_equals_than return d1 >= compare ? true : false; case 6: // abs_less_than return nd4j::math::nd4j_abs<X>(d1) < compare ? true : false; case 7: // abs_greater_than return nd4j::math::nd4j_abs<X>(d1) > compare ? true : false; case 8: // is inf return nd4j::math::nd4j_isinf(d1) ? true : false; case 9: // is nan return nd4j::math::nd4j_isnan(d1) ? true : false; case 10: return (d1 == compare) ? true : false; case 11: return (d1 != compare) ? true : false; case 12: // abs_greater_or_equals_than return nd4j::math::nd4j_abs<X>(d1) >= compare ? true : false; case 13: // abs_less_or_equals_than return nd4j::math::nd4j_abs<X>(d1) <= compare ? true : false; case 14: // isFinite return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)); case 15: // isInfinite return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1); default: printf("Undefined match condition: [%i]\n", mode); } return d1; } }; template <typename X, typename Z> class MatchCondition { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda op_def static Z startingValue(const X *input) { return static_cast<Z>(0); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return old + opOutput; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return old + opOutput; } // this op return 1.0 if condition met, 0.0 otherwise op_def static Z op(X d1, X *extraParams) { X compare = extraParams[0]; X eps = extraParams[1]; auto mode = static_cast<int>(extraParams[2]); //printf("value: %f; comp: %f; eps: %f; mode: %i;\n", (float) d1, (float) compare, (float) eps, mode); switch (mode) { case 0: // equals return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0; case 1: // not equals return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0; case 2: // less_than return d1 < compare ? 1 : 0; case 3: // greater_than return d1 > compare ? 1 : 0; case 4: // less_or_equals_than return d1 <= compare ? 1 : 0; case 5: // greater_or_equals_than return d1 >= compare ? 1 : 0; case 6: // abs_less_than return nd4j::math::nd4j_abs<X>(d1) < compare ? 1 : 0; case 7: // abs_greater_than return nd4j::math::nd4j_abs<X>(d1) > compare ? 1 : 0; case 8: // is inf return nd4j::math::nd4j_isinf(d1) ? 1 : 0; case 9: // is nan return nd4j::math::nd4j_isnan(d1) ? 1 : 0; case 10: return (d1 == compare) ? 1 : 0; case 11: return (d1 != compare) ? 1 : 0; case 12: // abs_greater_or_equals_than return nd4j::math::nd4j_abs<X>(d1) >= compare ? 1 : 0; case 13: // abs_less_or_equals_than return nd4j::math::nd4j_abs<X>(d1) <= compare ? 1 : 0; case 14: // isFinite return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)) ? 1 : 0; case 15: // isInfinite return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1) ? 1 : 0; default: printf("Undefined match condition: [%i]\n", mode); } return d1; } op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X> class ELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_elu<X,X>(d1); } }; template <typename X> class ELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_eluderivative<X,X>(d1); } }; template <typename X, typename Y, typename Z> class RELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { auto xt = static_cast<Z>(d1); auto xf = static_cast<Z>(d2); return xt < xf ? xf : xt; } }; template <typename X, typename Y, typename Z> class SXELogitsSmoother { public: op_def static Z op(X d1, Y d2, Z *params) { return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2; } }; template <typename X, typename Y, typename Z> class RELU6 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params); return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6); } }; template <typename X, typename Y, typename Z> class LeakyRELU { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_leakyrelu<X,Z>(d1, d2); } }; template <typename X> class SELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.0f) ? static_cast<X>(SELU_LAMBDA) * static_cast<X>(d1) : static_cast<X>(SELU_LAMBDA) * (static_cast<X>(SELU_ALPHA) * nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(SELU_ALPHA)); } }; template <typename X> class SELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * nd4j::math::nd4j_exp<X, X>(d1); } }; template <typename X, typename Y, typename Z> class LeakyRELUDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { if (d1 >= static_cast<X>(0)) return static_cast<Z>(1); else return static_cast<Z>(d2); } }; template <typename X> class ASin { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_asin<X,X>(d1); } }; template <typename X> class Sinh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sinh<X,X>(d1); } }; template <typename X> class SinhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cosh<X, X>(d1); } }; template <typename X> class Cosh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cosh<X,X>(d1); } }; template <typename X> class Tan { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tan<X,X>(d1); } }; template <typename X> class TanDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / nd4j::math::nd4j_pow<X, X, X>(nd4j::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f)); } }; template <typename X> class ATan { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_atan<X, X>(d1); } }; template <typename X, typename Y, typename Z> class Atan2 { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_atan2<X, Z>(d2, d1); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOps op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X> class Identity { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1; } }; template <typename X> class Stabilize { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X k = params[0]; if (d1 * k > static_cast<X>(- MIN_CUTFOFF)) return static_cast<X>(- MIN_CUTFOFF) / k; else if (d1 * k < static_cast<X>(MIN_CUTFOFF)) return static_cast<X>(MIN_CUTFOFF) / k; return d1; } }; template <typename X, typename Y, typename Z> class Step { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0)); } }; template <typename X> class OneMinus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1) - d1; } }; template <typename X> class Sum { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static X merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class ShannonEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)) * nd4j::math::nd4j_log<X, Z>(nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2.0f))); } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { return -reduction; } }; template <typename X, typename Z> class LogEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1); } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { //entropy is -sum(p(x) * log(p(x))); log entropy is log of this return nd4j::math::nd4j_log<X, Z>(-reduction); } }; template <typename X, typename Z> class Entropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1); } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x))) } }; template <typename X> class ASum { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X, typename Z> class CountNonZero { public: no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda op_def static Z startingValue(const X *input) { return static_cast<Z>(0); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f); } op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class CountZero { public: no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda op_def static Z startingValue(const X *input) { return static_cast<Z>(0.0f); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0); } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return static_cast<Z>(reduction); } }; template <typename X> class Prod { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda op_def static X startingValue(const X *input) { return static_cast<X>(1); } op_def static X merge(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static X update(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class Any { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ; } }; template <typename X, typename Z> class All { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(1); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static Z op(X d1, X *extraParams) { return d1; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0); } }; template <typename X, typename Z> class Mean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return d1; } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { return (Z) reduction / (Z) n; } }; template <typename X, typename Z> class AMean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static X update(X old, X opOutput, Z *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static Z op(X d1, Z *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_abs<X>(reduction) / static_cast<X>(n); } }; template <typename X> class Max { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda op_def static X startingValue(const X *input) { return input[0]; } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(old, opOutput); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(opOutput, old); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_max<X>(d1, d2); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_max<X>(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Y, typename Z> class AMaxPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1, Y d2) { auto z1 = static_cast<Z>(d1); auto z2 = static_cast<Z>(d2); if (nd4j::math::nd4j_abs<Z>(z1) > nd4j::math::nd4j_abs<Z>(z2)) return z1; else return z2; } }; template <typename X, typename Y, typename Z> class AMinPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1, Y d2) { auto z1 = static_cast<Z>(d1); auto z2 = static_cast<Z>(d2); if (nd4j::math::nd4j_abs<Z>(z1) < nd4j::math::nd4j_abs<Z>(z2)) return z1; else return z2; } }; template <typename X, typename Y, typename Z> class MaxPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } }; template <typename X, typename Y, typename Z> class MinPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } }; template <typename X> class AMax { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda op_def static X startingValue(const X *input) { return input[0]; } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput)); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old)); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_abs<X>(d1) > nd4j::math::nd4j_abs<X>(d2) ? d1 : d2; } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X> class AMin { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda op_def static X startingValue(const X *input) { return input[0]; } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput)); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old)); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X> class Min { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda op_def static X startingValue(const X *input) { return input[0]; } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(old, opOutput); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(opOutput, old); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_min<X>(d1, d2); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_min<X>(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class Norm1 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(nd4j::math::nd4j_abs<X>(d1)); } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { return static_cast<Z>(reduction); } }; template <typename X, typename Z> class Norm2 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_sqrt<X, Z>(reduction); } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1 * d1); } }; template <typename X, typename Z> class SquaredNorm { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1 * d1); } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { return static_cast<Z>(reduction); } }; template <typename X, typename Z> class NormFrobenius { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { X v = nd4j::math::nd4j_abs<X>(d1); return static_cast<Z>(v * v); } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_sqrt<X, Z>(reduction); } }; template <typename X, typename Z> class NormP { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return nd4j::math::nd4j_pow<X, Z, Z>(nd4j::math::nd4j_abs<X>(d1), extraParams[0]); } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_pow<X, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]); } }; template <typename X, typename Z> class NormMax { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, Z *extraParams) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput)); } op_def static Z op(X d1, Z *extraParams) { return d1; } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { return static_cast<Z>(nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(reduction), nd4j::math::nd4j_abs<X>(reduction))); } }; template <typename X, typename Z> class Variance { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z update(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static X op(X d1, Z *extraParams) { X mean = static_cast<X>(extraParams[0]); X ret = d1 - mean; return ret * ret; } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { // T bias = extraParams[1]; // return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1) return static_cast<Z>(reduction) / static_cast<Z>(n - 1); } }; /** * Standard deviation of a buffer */ template <typename X, typename Z> class StandardDeviation { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z update(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z op(X d1, Z *extraParams) { X mean = extraParams[0]; X ret = d1 - mean; return ret * ret; } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams); Z sqrtRet = nd4j::math::nd4j_sqrt<X, Z>(ret); return sqrtRet; } }; template <typename X, typename Y> class CosineSimilarity { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1])); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(d1 * d1); extraParams[1] += static_cast<Y>(d2 * d2); return static_cast<Y>(d1 * d2); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2)); return static_cast<Y>(d1 * d2); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class JaccardDistance { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { // num / denom return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]); } op_def static Y num(X d1, X d2) { return nd4j::math::nd4j_min<X>(d1, d2); } op_def static Y denom(X d1, X d2) { return nd4j::math::nd4j_max<X>(d1, d2); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(num(d1, d2)); extraParams[1] += static_cast<Y>(denom(d1, d2)); return static_cast<Y>(0.0f); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2)); return static_cast<Y>(0.0f); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class SimpleHammingDistance { public: static const int extraParamsLen = 0; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return static_cast<Y>(reduction / n); } op_def static Y op(X d1, X d2, Y *extraParams) { return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f); } op_def static void aggregateExtraParams(X *extraParamsTotal, X *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParams) { return op(d1, d2, extraParams); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class CosineDistance { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return (static_cast<Y>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1]))); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d1) * nd4j::math::nd4j_abs<X>(d1)); extraParams[1] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d2) * nd4j::math::nd4j_abs<X>(d2)); return (d1 * d2); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<Y>(d1) * nd4j::math::nd4j_abs<Y>(d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<Y>(d2) * nd4j::math::nd4j_abs<Y>(d2)); return (d1 * d2); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; /** * Dot product between 2 arrays */ template <typename X, typename Y> class Dot { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op //delete[] * extraParamsRef; } op_def static Y startingValue(X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return reduction; } op_def static Y op(X d1, X d2, Y *extraParamsRef) { return static_cast<Y>(d1 * d2); } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return opOutput + old; } op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {} }; /** * Op to check equality within arrays */ template <typename X, typename Z> class EqualsWithEps { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Z startingValue(X *input) { return static_cast<Z>(0.0f); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) { return reduction; } op_def static Z op(X d1, X d2, Z *extraParamsRef) { double eps = nd4j::math::nd4j_abs<double>(extraParamsRef[2]); return static_cast<Z>(!nd4j::math::nd4j_eq<X>(d1, d2, eps)); } #ifdef __CUDACC__ __device__ static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) { return opOutput + old; } op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {} }; template <typename X, typename Y> class EuclideanDistance { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Y startingValue(X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return nd4j::math::nd4j_sqrt<Y, Y>(reduction); } op_def static Y op(X d1, X d2, Y *extraParamsRef) { X ret = d1 - d2; return static_cast<Y>(ret * ret); } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return opOutput + old; } op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {} }; template <typename X, typename Y> class ManhattanDistance { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Y startingValue(X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return reduction; } op_def static Y op(X d1, X d2, Y *extraParamsRef) { return nd4j::math::nd4j_abs<X>(d1 - d2); } op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return old + opOutput; } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif #ifndef __clang__ #pragma omp declare simd uniform(extraParamsRef) #endif op_def static Y merge(X old, X opOutput, X *extraParamsRef) { return update(old, opOutput, extraParamsRef); } }; template <typename X> class IndexAbsoluteMax { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return nd4j::math::nd4j_abs<X>(val); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value); old.value = nd4j::math::nd4j_abs<X>(old.value); if (opOutput.value > old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (nd4j::math::nd4j_abs<X>(f1.value) > nd4j::math::nd4j_abs<X>(f2.value)) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline X startingValue(X *input) { return 0; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X> class FirstIndex { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams); //printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index); if (res == static_cast<X>(0)) return old; if (old.index < 0) return opOutput; if (old.index > opOutput.index) return opOutput; return old; } static _CUDA_HD inline X startingValue(X *input) { return -nd4j::DataTypeUtils::max<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = -1; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.index > f2.index) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } }; template <typename X> class LastIndex { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams); if (res == static_cast<X>(0)) return old; if (old.index < 0) return opOutput; if (old.index < opOutput.index) return opOutput; return old; } static _CUDA_HD inline X startingValue(X *input) { return -nd4j::DataTypeUtils::max<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = -1; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.index < f2.index) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } }; template <typename X> class IndexMax { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { if (opOutput.value > old.value) { return opOutput; } #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.value > f2.value) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline X startingValue(X *input) { return -nd4j::DataTypeUtils::max<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X> class IndexAbsoluteMin { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op( functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD inline X startingValue(X *input) { return nd4j::DataTypeUtils::max<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value); old.value = nd4j::math::nd4j_abs<X>(old.value); if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (nd4j::math::nd4j_abs<X>(f1.value) < nd4j::math::nd4j_abs<X>(f2.value)) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X> class IndexMin { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op( functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD inline X startingValue(X *input) { return nd4j::DataTypeUtils::max<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.value < f2.value) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class SummaryStatsVariance { public: static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) { if (biasCorrected) { Z ret = static_cast<Z>(val.varianceBiasCorrected()); if (ret < static_cast<Z>(0.0f)) return static_cast<Z>(val.variance()); return ret; } return static_cast<Z>(val.variance()); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) { return d1; } }; template <typename X, typename Z> class SummaryStatsStandardDeviation { public: static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) { if (biasCorrected) { auto ret = static_cast<Z>(val.varianceBiasCorrected()); if (ret < static_cast<Z>(0.0f)) return nd4j::math::nd4j_sqrt<double, Z>(val.variance()); else return nd4j::math::nd4j_sqrt<double, Z>(ret); } return nd4j::math::nd4j_sqrt<double, Z>(val.variance()); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) { return d1; } }; template <typename X> class DropOut { public: no_op_exec_special_same no_op_exec_special_same_cuda inline _CUDA_D static X op(X d1, X *params) { X prob = params[0]; #ifdef __CUDACC__ X length = params[1]; X tid = gridDim.x * blockDim.x + threadIdx.x; X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid))); #else X rnd = static_cast<X>(rand() / RAND_MAX); #endif return rnd >= prob ? static_cast<X>(0.0f) : d1; } }; template <typename X, typename Y, typename Z> class DropOutInverted { public: no_op_exec_special no_op_exec_special_cuda #ifdef __CUDACC__ __device__ #endif inline static Z op(X d1, Y d2, Z *params) { Y prob = d2; #ifdef __CUDACC__ X length = params[1]; X tid = gridDim.x * blockDim.x + threadIdx.x; X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid))); #else X rnd = static_cast<X>(rand() / RAND_MAX); #endif return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob)); } }; template <typename X, typename Y, typename Z> class ReplaceNans { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ; } }; // this op is used for conditional pairwise transforms only template <typename X, typename Y, typename Z> class CompareAndReplace{ public: // op definition for PairWise Transform op_def static Z op(X d1, Y d2, Z *params) { auto zd1 = static_cast<Z>(d1); auto zd2 = static_cast<Z>(d2); auto compare = params[0]; auto eps = params[2]; int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<Z>(zd1 - compare) <= eps) return zd2; else return zd1; else if (mode == 1) // not equals eps if (nd4j::math::nd4j_abs<Z>(zd1 - compare) > eps) return zd2; else return zd1; else if (mode == 2) // less_than eps if (zd1 < compare) return zd2; else return zd1; else if (mode ==3) // greater_than if (zd1 > compare) return zd2; else return zd1; else if (mode == 4) // less_or_equals_than if (zd1 <= compare) return zd2; else return zd1; else if (mode == 5) // greater_or_equals_than if (zd1 >= compare) return zd2; else return zd1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<Z>(zd1) < compare) return zd2; else return zd1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<Z>(zd1) > compare) return zd2; else return zd1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(zd1)) return zd2; else return zd1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(zd1)) return zd2; else return zd1; else if (mode == 10) if (zd1 == compare) return zd2; else return zd1; else if (mode == 11) if (zd1 != compare) return zd2; else return zd1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<Z>(zd1) >= compare) return zd2; else return zd1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<Z>(zd1) <= compare) return zd2; else return zd1; else printf("Undefined boolean operation: [%i]\n", mode); return zd1; } }; template <typename X, typename Y, typename Z> class CompareAndSet { public: // op definition for PairWise Transform op_def static Z op(X dX, Y dY, Z *params) { auto d1 = static_cast<Z>(dX); auto d2 = static_cast<Z>(dY); auto compare = params[0]; auto eps = params[2]; auto mode = static_cast<int>(params[3]); if (mode == 0) // equals if (nd4j::math::nd4j_abs<Z>(d2 - compare) <= eps) return d2; else return d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<Z>(d2 - compare) > eps) return d2; else return d1; else if (mode == 2) // less_than if (d2 < compare) return d2; else return d1; else if (mode ==3) // greater_than if (d2 > compare) return d2; else return d1; else if (mode == 4) // less_or_equals_than if (d2 <= compare) return d2; else return d1; else if (mode == 5) // greater_or_equals_than if (d2 >= compare) return d2; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<Z>(d2) < compare) return d2; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<Z>(d2) > compare) return d2; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d2)) return d2; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d2)) return d2; else return d1; else if (mode == 10) if (d2 == compare) return d2; else return d1; else if (mode == 11) if (d2 != compare) return d2; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<Z>(d1) >= compare) return d2; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<Z>(d1) <= compare) return d2; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; template <typename X> class CompareAndSetTransform { public: no_op_exec_special_same no_op_exec_special_same_cuda // op definition for Transform op_def static X op(X d1, X *params) { auto compare = params[0]; auto set = params[1]; auto eps = params[2]; // with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<X>(d1 - compare) <= eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<X>(d1 - compare) > eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1; else if (mode == 2) // less_than if (d1 < compare) return set; else return d1; else if (mode ==3) // greater_than if (d1 > compare) return set; else return d1; else if (mode == 4) // less_or_equals_than if (d1 <= compare) return set; else return d1; else if (mode == 5) // greater_or_equals_than if (d1 >= compare) return set; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<X>(d1) < compare) return set; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<X>(d1) > compare) return set; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d1)) return set; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d1)) return set; else return d1; else if (mode == 10) if (d1 == compare) return set; else return d1; else if (mode == 11) if (d1 != compare) return set; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<X>(d1) >= compare) return set; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<X>(d1) <= compare) return set; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; } #endif
BlockProcessor.h
// File : BlockProcessor.h // Date : Fri 01 Apr 2016 05:52:01 PM CEST // Author : Fabian Wermelinger // Description: Process all blocks // Copyright 2016 ETH Zurich. All Rights Reserved. #ifndef BLOCKPROCESSORMPI_H_IKFSZWUJ #define BLOCKPROCESSORMPI_H_IKFSZWUJ #include <vector> #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #include "Types.h" #include <iostream> using namespace std; template <typename TLab, typename Operator, typename TGrid> inline void BlockProcessor(Operator rhs, TGrid &grid, const Real t = 0, const bool record = false) { vector<BlockInfo> avail0; #ifdef _OPENMP const int nthreads = omp_get_max_threads(); #else const int nthreads = 1; #endif /* _OPENMP */ TLab * labs = new TLab[nthreads]; // Setup the static stencil information for this kernel (operator) const int ss[3] = {rhs.stencil.sx, rhs.stencil.sy, rhs.stencil.sz}; const int se[3] = {rhs.stencil.ex, rhs.stencil.ey, rhs.stencil.ez}; for (int i = 0; i < nthreads; ++i) labs[i].prepare(grid, ss, se, rhs.stencil.tensorial); // process inner blocks avail0 = grid.getBlocksInfo(); BlockInfo * ary0 = &avail0.front(); #pragma omp parallel num_threads(nthreads) { #ifdef _OPENMP int tid = omp_get_thread_num(); #else int tid = 0; #endif /* _OPENMP */ TLab& mylab = labs[tid]; #pragma omp for schedule(dynamic,1) for (size_t i = 0; i < avail0.size(); i++) { mylab.load(ary0[i], t); rhs(mylab, ary0[i], *(FluidBlock*)ary0[i].ptrBlock); } } // clean up if(labs!=NULL) { delete[] labs; labs=NULL; } } #endif /* BLOCKPROCESSORMPI_H_IKFSZWUJ */
GB_unaryop__abs_int8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int8_uint32 // op(A') function: GB_tran__abs_int8_uint32 // C type: int8_t // A type: uint32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int8_uint32 ( int8_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dataracetest1.c
int main() { double a[10]; #pragma omp parallel for for(int i=1;i<4;i++) { a[i]=a[i+1]; } return 0; }
seramp.c
#include<Python.h> #include<numpy/arrayobject.h> #include<math.h> #include<omp.h> #define IND(a,i) *((double *)(a->data+i*a->strides[0])) static PyObject *seramp(PyObject *self, PyObject *args, PyObject *keywds); static PyObject *seramp(PyObject *self, PyObject *args, PyObject *keywds) { PyObject *etc; PyArrayObject *x,*y, *rampparams; double goal,r0,r1,pm; int i; npy_intp dims[1]; // etc = PyList_New(0); static char *kwlist[] = {"rampparams","x","etc",NULL}; if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&rampparams,&x,&etc)) { return NULL; } goal = IND(rampparams,0); r0 = IND(rampparams,1); r1 = IND(rampparams,2); pm = IND(rampparams,3); dims[0] = x->dimensions[0]; y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE); #pragma omp parallel for for(i=0;i<dims[0];i++) { IND(y,i) = goal+pm*exp(-r0*IND(x,i) + r1); } return PyArray_Return(y); } static char module_docstring[]="\ This function creates a model that fits a ramp using a rising exponential.\n\ \n\ Parameters\n\ ----------\n\ goal: goal as x -> inf\n\ m: rise exp\n\ x0: time offset\n\ x: Array of time/phase points\n\ \n\ Returns\n\ -------\n\ This function returns an array of y values by combining an eclipse and a rising exponential\n\ \n\ Revisions\n\ ---------\n\ 2008-06-24 Kevin Stevenson, UCF \n\ kevin218@knights.ucf.edu\n\ Original version\n\ 2010-12-24 Nate Lust, UCF \n\ natelust at linux dot com\n\ 2018-11-22 Jonathan Fraine, SSI\n\ jfraine at spacescience.org\n\ Updated c extensions to python3, with support for python2.7\n\ "; static PyMethodDef module_methods[] = { {"seramp",(PyCFunction)seramp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}}; PyMODINIT_FUNC #if PY_MAJOR_VERSION >= 3 PyInit_seramp(void) #else initseramp(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module; static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "seramp", /* m_name */ module_docstring, /* m_doc */ -1, /* m_size */ module_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; #endif #if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); if (!module) return NULL; /* Load `numpy` functionality. */ import_array(); return module; #else PyObject *m = Py_InitModule3("seramp", module_methods, module_docstring); if (m == NULL) return; /* Load `numpy` functionality. */ import_array(); #endif }
mandelSSEDD.c
#include <immintrin.h> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include "mandelSSEDD.h" /* es können 2 DD auf einmal bearbeitet werden, d.h. in einem Register stehen 2 hi, im anderen die 2 lo */ DD2 DD2_mul(const DD2 pDD1, const DD2 pDD2) { const __m128d hi = pDD1.hi; const __m128d lo = pDD1.lo; const __m128d yhi = pDD2.hi; const __m128d ylo = pDD2.lo; __m128d t, tau, u, v, w; t = hi * yhi; /* Highest order double term. */ tau = _mm_fmsub_pd(hi, yhi, t); v = hi * ylo; w = lo * yhi; tau += v + w; /* Add in other second-order terms. */ u = t + tau; const __m128d rlo = (t - u) + tau; return (DD2){u, rlo}; } DD2 DD2_mul_m128d(const DD2 pDD1, const __m128d pDouble4) { const __m128d hi = pDD1.hi; const __m128d lo = pDD1.lo; const __m128d yhi = pDouble4; __m128d t, tau, u, w; t = hi * yhi; /* Highest order double term. */ tau = _mm_fmsub_pd(hi, yhi, t); w = lo * yhi; tau += w; /* Add in other second-order terms. */ u = t + tau; const __m128d rlo = (t - u) + tau; return (DD2){u, rlo}; } DD2 DD2_add(const DD2 pDD1, const DD2 pDD2) { const __m128d hi = pDD1.hi; const __m128d lo = pDD1.lo; const __m128d yhi = pDD2.hi; const __m128d ylo = pDD2.lo; __m128d z, q, zz, xh; z = hi + yhi; q = hi - z; zz = q + yhi + (hi - (q + z)) + lo + ylo; xh = z + zz; const __m128d rlo = z - xh + zz; return (DD2){xh, rlo}; } DD2 DD2_add__m128d(const DD2 pDD1, const __m128d y) { __m128d hi = pDD1.hi; __m128d lo = pDD1.lo; __m128d z, q, zz, xh; z = hi + y; q = hi - z; zz = q + y + (hi - (q + z)) + lo; xh = z + zz; const __m128d rlo = z - xh + zz; return (DD2){xh, rlo}; } DD2 DD2_sub(const DD2 pDD1,const DD2 pDD2) { return DD2_add(pDD1, (DD2){-pDD2.hi, -pDD2.lo}); } void checkCompilerOptimizationDD2() { DD2 y = (DD2){_mm_set1_pd(2.9615004935834156e-03),_mm_set1_pd(-1.8408960875370855e-20)}; DD2 erg = DD2_mul_m128d(y, _mm_set1_pd(1.0120000000000000e+03)); if ( erg.lo[0]!=4.2085453253312943e-17) { printf("compiler break DD Logik -> please do not use -ffast-math or -funsafe-math-optimizations\n"); fflush(stdout); } } void mandel_ssedd( int32_t *iters, double *lastZrs, double *lastZis, double *distancesR, double *distancesI, const int32_t mode, const int32_t width, const int32_t height, const double xStartHi, const double xStartLo, const double yStartHi, const double yStartLo, const double juliaCrHi, const double juliaCrLo, const double juliaCiHi, const double juliaCiLo, const double xIncHi, const double xIncLo, const double yIncHi, const double yIncLo, const int32_t maxIterations, const double sqrEscapeRadius) { checkCompilerOptimizationDD2(); const __m128d mZero = _mm_set1_pd(0); const __m128d mOne = _mm_set1_pd(1); const __m128d mOneminus = _mm_set1_pd(-1); const __m128d threshold = _mm_set1_pd(sqrEscapeRadius); const DD2 xmin = (DD2){_mm_set1_pd(xStartHi), _mm_set1_pd(xStartLo)}; const DD2 ymin = (DD2){_mm_set1_pd(yStartHi), _mm_set1_pd(yStartLo)}; const DD2 xScale = (DD2){_mm_set1_pd(xIncHi), _mm_set1_pd(xIncLo)}; const DD2 yScale = (DD2){_mm_set1_pd(yIncHi), _mm_set1_pd(yIncLo)}; const DD2 juliaCr = (DD2){_mm_set1_pd(juliaCrHi), _mm_set1_pd(juliaCrLo)}; const DD2 juliaCi = (DD2){_mm_set1_pd(juliaCiHi), _mm_set1_pd(juliaCiLo)}; const DD2 zero = (DD2){mZero, mZero}; const DD2 one = (DD2){mOne, mZero}; const DD2 xInc = DD2_mul_m128d(xScale, _mm_set1_pd(2)); #pragma omp parallel for schedule(dynamic, 1) for (int y = 0; y < height; y++) { // as long as the assignment loop is failing, we calc some pixels less to avoid writing outside array limits const DD2 tY = DD2_add(ymin,DD2_mul_m128d(yScale,_mm_set1_pd(y))); const DD2 ci = mode == MODE_JULIA ? juliaCi : tY; DD2 tX = DD2_add(xmin,DD2_mul_m128d(xScale,_mm_set_pd(1,0))); for (int x = 0; x < width; x += 2) { const DD2 cr = mode == MODE_JULIA ? juliaCr : tX; DD2 zr = tX; DD2 zi = tY; int32_t k = 0; // store the iterations __m128d mk = _mm_set1_pd(k); // last Zr/Zi values -> make them accessible as float vector __m128d mlastZr = mZero; __m128d mlastZi = mZero; // distance DD2 dr = one; DD2 di = zero; __m128d lastDr = dr.hi; __m128d lastDi = di.hi; __m128d previousInsideMask = _mm_set1_pd(0xFFFFFFFFFFFFFFFF); while (++k <= maxIterations) { /* Compute z1 from z0 */ const DD2 zr2 = DD2_mul(zr,zr); const DD2 zi2 = DD2_mul(zi,zi); const DD2 zr2zi2 = DD2_add(zr2,zi2); const __m128d insideMask = _mm_cmp_pd(zr2zi2.hi, threshold, _CMP_LT_OS); // store last inside values of z // copy only if inside mask changes for the vector (xor previous and current const __m128d noticeZMask = _mm_xor_pd(insideMask, previousInsideMask); mlastZr = _mm_and_pd(noticeZMask, zr.hi) + mlastZr; mlastZi = _mm_and_pd(noticeZMask, zi.hi) + mlastZi; if( mode == MODE_MANDEL_DISTANCE ) { lastDr = _mm_and_pd(noticeZMask, dr.hi) + lastDr; lastDi = _mm_and_pd(noticeZMask, di.hi) + lastDi; } previousInsideMask = insideMask; /* Early bailout? */ if (_mm_testz_pd(insideMask, mOneminus)) { break; } /* Increment k for all vectors inside */ mk = _mm_and_pd(insideMask, mOne) + mk; if ( mode == MODE_MANDEL_DISTANCE) { const DD2 zwergDr = DD2_sub(DD2_mul(zr,dr), DD2_mul(zi,di)); const DD2 zwergDi = DD2_add(DD2_mul(zr,di), DD2_mul(zi,dr)); dr = DD2_add(DD2_add(zwergDr,zwergDr), one); di = DD2_add(zwergDi,zwergDi); } const DD2 zrzi = DD2_mul(zr,zi); zi = DD2_add(DD2_add(zrzi,zrzi),ci); zr = DD2_add(DD2_sub(zr2,zi2),cr); } // convert counter to int and make it accessible via array index union { int32_t i[2]; __m128i m; } vCount; vCount.m = _mm_cvtpd_epi32(mk); double tLastZrs[2]; double tLastZis[2]; _mm_storeu_pd(tLastZrs, mlastZr); _mm_storeu_pd(tLastZis, mlastZi); const int tIndex = x + y * width; for ( int i=0; i<2 && x+i<width; i++ ) { iters[tIndex+i] = vCount.i[i]; lastZrs[tIndex+i] = tLastZrs[i]; lastZis[tIndex+i] = tLastZis[i]; } if ( mode == MODE_MANDEL_DISTANCE) { double tLastDrs[2]; double tLastDis[2]; _mm_storeu_pd(tLastDrs, lastDr); _mm_storeu_pd(tLastDis, lastDi); for ( int i=0; i<2 && x+i<width; i++ ) { distancesR[tIndex+i] = tLastDrs[i]; distancesI[tIndex+i] = tLastDis[i]; } } tX = DD2_add(tX, xInc); } } }
reduction-clause.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main(int argc, char **argv) { int i, n=20, a[n],suma=10; if(argc < 2) { fprintf(stderr,"Falta iteraciones\n"); exit(-1); } n = atoi(argv[1]); if (n>20) { n=20; printf("n=%d",n); } for (i=0; i<n; i++) a[i] = i; #pragma omp parallel for reduction(+:suma) for (i=0; i<n; i++) suma += a[i]; printf("Tras 'parallel' suma=%d\n",suma); }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride); const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride); typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction //Index nc = blocking.nc(); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); std::size_t sizeA = kc*mc; std::size_t sizeW = kc*Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0); ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0); RhsScalar* blockB = blocking.blockB(); eigen_internal_assert(blockB!=0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing A'. pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc); // Pack B_k to B' in a parallel fashion: // each thread packs the sub block B_k,j to B'_j where j is the thread id. // However, before copying to B'_j, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length); // Notify the other threads that the part B'_j is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per B'_j for(Index shift=0; shift<threads; ++shift) { Index j = (tid+shift)%threads; // At this point we have to make sure that B'_j has been updated by the thread j, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if(shift>0) while(info[j].sync!=k) {} gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w); } // Then keep going as usual with the remaining A' for(Index i=mc; i<rows; i+=mc) { const Index actual_mc = (std::min)(i+mc,rows)-i; // pack A_i,k to A' pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc); // C_i += A' * B' gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w); } // Release all the sub blocks B'_j of B' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index j=0; j<threads; ++j) #pragma omp atomic --(info[j].users); } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*cols; std::size_t sizeW = kc*Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW()); // For each horizontal panel of the rhs, and corresponding panel of the lhs... // (==GEMM_VAR1) for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack rhs's panel into a sequential chunk of memory (L2 caching) // Note that this panel will be read as many times as the number of blocks in the lhs's // vertical panel which is, in practice, a very low number. pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols); // For each mc x kc block of the lhs's vertical panel... // (==GEPP_VAR1) for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; // We pack the lhs's block into a sequential chunk of memory (L1 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro vertical panel of the large rhs's panel (e.g., cols/4 times). pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc); // Everything is packed, we can now call the block * panel kernel: gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW); } } } } }; /********************************************************************************* * Specialization of GeneralProduct<> for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Lhs, typename Rhs> struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> > : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> > {}; template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession() const { m_blocking.allocateB(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), /*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(), /*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; RhsScalar* m_blockW; DenseIndex m_mc; DenseIndex m_nc; DenseIndex m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0) {} inline DenseIndex mc() const { return m_mc; } inline DenseIndex nc() const { return m_nc; } inline DenseIndex kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } inline RhsScalar* blockW() { return m_blockW; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth, SizeW = MaxDepth * Traits::WorkSpaceFactor }; EIGEN_ALIGN16 LhsScalar m_staticA[SizeA]; EIGEN_ALIGN16 RhsScalar m_staticB[SizeB]; EIGEN_ALIGN16 RhsScalar m_staticW[SizeW]; public: gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; this->m_blockA = m_staticA; this->m_blockB = m_staticB; this->m_blockW = m_staticW; } inline void allocateA() {} inline void allocateB() {} inline void allocateW() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; DenseIndex m_sizeA; DenseIndex m_sizeB; DenseIndex m_sizeW; public: gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; m_sizeW = this->m_kc*Traits::WorkSpaceFactor; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateW() { if(this->m_blockW==0) this->m_blockW = aligned_new<RhsScalar>(m_sizeW); } void allocateAll() { allocateA(); allocateB(); allocateW(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); aligned_delete(this->m_blockW, m_sizeW); } }; } // end namespace internal template<typename Lhs, typename Rhs> class GeneralProduct<Lhs, Rhs, GemmProduct> : public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> { enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; public: EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef Scalar ResScalar; GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp; EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar); } template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const { eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols()); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit); } }; } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
plot.h
#ifndef OPENMC_PLOT_H #define OPENMC_PLOT_H #include <unordered_map> #include <sstream> #include "pugixml.hpp" #include "xtensor/xarray.hpp" #include "hdf5.h" #include "openmc/position.h" #include "openmc/constants.h" #include "openmc/cell.h" #include "openmc/geometry.h" #include "openmc/particle.h" #include "openmc/xml_interface.h" namespace openmc { //=============================================================================== // Global variables //=============================================================================== class Plot; namespace model { extern std::vector<Plot> plots; //!< Plot instance container extern std::unordered_map<int, int> plot_map; //!< map of plot ids to index } // namespace model //=============================================================================== // RGBColor holds color information for plotted objects //=============================================================================== struct RGBColor { //Constructors RGBColor() : red(0), green(0), blue(0) { }; RGBColor(const int v[3]) : red(v[0]), green(v[1]), blue(v[2]) { }; RGBColor(int r, int g, int b) : red(r), green(g), blue(b) { }; RGBColor(const std::vector<int> &v) { if (v.size() != 3) { throw std::out_of_range("Incorrect vector size for RGBColor."); } red = v[0]; green = v[1]; blue = v[2]; } bool operator ==(const RGBColor& other) { return red == other.red && green == other.green && blue == other.blue; } // Members uint8_t red, green, blue; }; // some default colors const RGBColor WHITE {255, 255, 255}; const RGBColor RED {255, 0, 0}; typedef xt::xtensor<RGBColor, 2> ImageData; struct IdData { // Constructor IdData(size_t h_res, size_t v_res); // Methods void set_value(size_t y, size_t x, const Particle& p, int level); void set_overlap(size_t y, size_t x); // Members xt::xtensor<int32_t, 3> data_; //!< 2D array of cell & material ids }; struct PropertyData { // Constructor PropertyData(size_t h_res, size_t v_res); // Methods void set_value(size_t y, size_t x, const Particle& p, int level); void set_overlap(size_t y, size_t x); // Members xt::xtensor<double, 3> data_; //!< 2D array of temperature & density data }; enum class PlotType { slice = 1, voxel = 2 }; enum class PlotBasis { xy = 1, xz = 2, yz = 3 }; enum class PlotColorBy { cells = 0, mats = 1 }; //=============================================================================== // Plot class //=============================================================================== class PlotBase { public: template<class T> T get_map() const; // Members public: Position origin_; //!< Plot origin in geometry Position width_; //!< Plot width in geometry PlotBasis basis_; //!< Plot basis (XY/XZ/YZ) std::array<size_t, 3> pixels_; //!< Plot size in pixels bool color_overlaps_; //!< Show overlapping cells? int level_; //!< Plot universe level }; template<class T> T PlotBase::get_map() const { size_t width = pixels_[0]; size_t height = pixels_[1]; // get pixel size double in_pixel = (width_[0])/static_cast<double>(width); double out_pixel = (width_[1])/static_cast<double>(height); // size data array T data(width, height); // setup basis indices and initial position centered on pixel int in_i, out_i; Position xyz = origin_; switch(basis_) { case PlotBasis::xy : in_i = 0; out_i = 1; break; case PlotBasis::xz : in_i = 0; out_i = 2; break; case PlotBasis::yz : in_i = 1; out_i = 2; break; #ifdef __GNUC__ default: __builtin_unreachable(); #endif } // set initial position xyz[in_i] = origin_[in_i] - width_[0] / 2. + in_pixel / 2.; xyz[out_i] = origin_[out_i] + width_[1] / 2. - out_pixel / 2.; // arbitrary direction Direction dir = {0.7071, 0.7071, 0.0}; #pragma omp parallel { Particle p; p.r() = xyz; p.u() = dir; p.coord_[0].universe = model::root_universe; int level = level_; int j{}; #pragma omp for for (int y = 0; y < height; y++) { p.r()[out_i] = xyz[out_i] - out_pixel * y; for (int x = 0; x < width; x++) { p.r()[in_i] = xyz[in_i] + in_pixel * x; p.n_coord_ = 1; // local variables bool found_cell = find_cell(&p, 0); j = p.n_coord_ - 1; if (level >=0) {j = level + 1;} if (found_cell) { data.set_value(y, x, p, j); } if (color_overlaps_ && check_cell_overlap(&p, false)) { data.set_overlap(y, x); } } // inner for } // outer for } // omp parallel return data; } class Plot : public PlotBase { public: // Constructor Plot(pugi::xml_node plot); // Methods private: void set_id(pugi::xml_node plot_node); void set_type(pugi::xml_node plot_node); void set_output_path(pugi::xml_node plot_node); void set_bg_color(pugi::xml_node plot_node); void set_basis(pugi::xml_node plot_node); void set_origin(pugi::xml_node plot_node); void set_width(pugi::xml_node plot_node); void set_universe(pugi::xml_node plot_node); void set_default_colors(pugi::xml_node plot_node); void set_user_colors(pugi::xml_node plot_node); void set_meshlines(pugi::xml_node plot_node); void set_mask(pugi::xml_node plot_node); void set_overlap_color(pugi::xml_node plot_node); // Members public: int id_; //!< Plot ID PlotType type_; //!< Plot type (Slice/Voxel) PlotColorBy color_by_; //!< Plot coloring (cell/material) int meshlines_width_; //!< Width of lines added to the plot int index_meshlines_mesh_ {-1}; //!< Index of the mesh to draw on the plot RGBColor meshlines_color_; //!< Color of meshlines on the plot RGBColor not_found_ {WHITE}; //!< Plot background color RGBColor overlap_color_ {RED}; //!< Plot overlap color std::vector<RGBColor> colors_; //!< Plot colors std::string path_plot_; //!< Plot output filename }; //=============================================================================== // Non-member functions //=============================================================================== //! Add mesh lines to image data of a plot object //! \param[in] plot object //! \param[out] image data associated with the plot object void draw_mesh_lines(Plot pl, ImageData& data); //! Write a ppm image to file using a plot object's image data //! \param[in] plot object //! \param[out] image data associated with the plot object void output_ppm(Plot pl, const ImageData& data); //! Initialize a voxel file //! \param[in] id of an open hdf5 file //! \param[in] dimensions of the voxel file (dx, dy, dz) //! \param[out] dataspace pointer to voxel data //! \param[out] dataset pointer to voxesl data //! \param[out] pointer to memory space of voxel data void voxel_init(hid_t file_id, const hsize_t* dims, hid_t* dspace, hid_t* dset, hid_t* memspace); //! Write a section of the voxel data to hdf5 //! \param[in] voxel slice //! \param[out] dataspace pointer to voxel data //! \param[out] dataset pointer to voxesl data //! \param[out] pointer to data to write void voxel_write_slice(int x, hid_t dspace, hid_t dset, hid_t memspace, void* buf); //! Close voxel file entities //! \param[in] data space to close //! \param[in] dataset to close //! \param[in] memory space to close void voxel_finalize(hid_t dspace, hid_t dset, hid_t memspace); //=============================================================================== // External functions //=============================================================================== //! Read plot specifications from a plots.xml file void read_plots_xml(); //! Create a ppm image for a plot object //! \param[in] plot object void create_ppm(Plot pl); //! Create an hdf5 voxel file for a plot object //! \param[in] plot object void create_voxel(Plot pl); //! Create a randomly generated RGB color //! \return RGBColor with random value RGBColor random_color(); } // namespace openmc #endif // OPENMC_PLOT_H
builder.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef BUILDER_H_ #define BUILDER_H_ #include <algorithm> #include <cinttypes> #include <fstream> #include <functional> #include <type_traits> #include <utility> #include "command_line.h" #include "generator.h" #include "graph.h" #include "platform_atomics.h" #include "pvector.h" #include "reader.h" #include "timer.h" #include "util.h" /* GAP Benchmark Suite Class: BuilderBase Author: Scott Beamer Given arguements from the command line (cli), returns a built graph - MakeGraph() will parse cli and obtain edgelist and call MakeGraphFromEL(edgelist) to perform actual graph construction - edgelist can be from file (reader) or synthetically generated (generator) - Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h) */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_, bool invert = true> class BuilderBase { typedef EdgePair<NodeID_, DestID_> Edge; typedef pvector<Edge> EdgeList; const CLBase &cli_; bool symmetrize_; bool needs_weights_; int64_t num_nodes_ = -1; public: explicit BuilderBase(const CLBase &cli) : cli_(cli) { symmetrize_ = cli_.symmetrize(); needs_weights_ = !std::is_same<NodeID_, DestID_>::value; } DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) { return e.u; } DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e) { return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w); } NodeID_ FindMaxNodeID(const EdgeList &el) { NodeID_ max_seen = 0; #pragma omp parallel for reduction(max : max_seen) for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; max_seen = std::max(max_seen, e.u); max_seen = std::max(max_seen, (NodeID_) e.v); } return max_seen; } pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) { pvector<NodeID_> degrees(num_nodes_, 0); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) fetch_and_add(degrees[e.u], 1); if (symmetrize_ || (!symmetrize_ && transpose)) fetch_and_add(degrees[(NodeID_) e.v], 1); } return degrees; } static pvector<SGOffset> PrefixSum(const pvector<NodeID_> &degrees) { pvector<SGOffset> sums(degrees.size() + 1); SGOffset total = 0; for (size_t n=0; n < degrees.size(); n++) { sums[n] = total; total += degrees[n]; } sums[degrees.size()] = total; return sums; } static pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<SGOffset> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset lsum = 0; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<SGOffset> bulk_prefix(num_blocks+1); SGOffset total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<SGOffset> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset local_total = bulk_prefix[block]; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; return prefix; } // Removes self-loops and redundant edges // Side effect: neighbor IDs will be sorted void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose, DestID_*** sq_index, DestID_** sq_neighs) { pvector<NodeID_> diffs(g.num_nodes()); DestID_ *n_start, *n_end; #pragma omp parallel for private(n_start, n_end) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) { n_start = g.in_neigh(n).begin(); n_end = g.in_neigh(n).end(); } else { n_start = g.out_neigh(n).begin(); n_end = g.out_neigh(n).end(); } std::sort(n_start, n_end); DestID_ *new_end = std::unique(n_start, n_end); new_end = std::remove(n_start, new_end, n); diffs[n] = new_end - n_start; } pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs); *sq_neighs = new DestID_[sq_offsets[g.num_nodes()]]; *sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs); #pragma omp parallel for private(n_start) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) n_start = g.in_neigh(n).begin(); else n_start = g.out_neigh(n).begin(); std::copy(n_start, n_start+diffs[n], (*sq_index)[n]); } } CSRGraph<NodeID_, DestID_, invert> SquishGraph( const CSRGraph<NodeID_, DestID_, invert> &g) { DestID_ **out_index, *out_neighs, **in_index, *in_neighs; SquishCSR(g, false, &out_index, &out_neighs); if (g.directed()) { if (invert) SquishCSR(g, true, &in_index, &in_neighs); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs, in_index, in_neighs); } else { return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs); } } /* Graph Bulding Steps (for CSR): - Read edgelist once to determine vertex degrees (CountDegrees) - Determine vertex offsets by a prefix sum (ParallelPrefixSum) - Allocate storage and set points according to offsets (GenIndex) - Copy edges into storage */ void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index, DestID_** neighs) { pvector<NodeID_> degrees = CountDegrees(el, transpose); pvector<SGOffset> offsets = ParallelPrefixSum(degrees); *neighs = new DestID_[offsets[num_nodes_]]; *index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v; if (symmetrize_ || (!symmetrize_ && transpose)) (*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] = GetSource(e); } } CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) { DestID_ **index = nullptr, **inv_index = nullptr; DestID_ *neighs = nullptr, *inv_neighs = nullptr; Timer t; t.Start(); if (num_nodes_ == -1) num_nodes_ = FindMaxNodeID(el)+1; if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el); MakeCSR(el, false, &index, &neighs); if (!symmetrize_ && invert) MakeCSR(el, true, &inv_index, &inv_neighs); t.Stop(); //PrintTime("Build Time", t.Seconds()); if (symmetrize_) return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs); else return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs, inv_index, inv_neighs); } CSRGraph<NodeID_, DestID_, invert> MakeGraph() { CSRGraph<NodeID_, DestID_, invert> g; { // extra scope to trigger earlier deletion of el (save memory) EdgeList el; if (cli_.filename() != "") { Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.filename()); if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg")) { return r.ReadSerializedGraph(); } else { el = r.ReadFile(needs_weights_); } } else if (cli_.scale() != -1) { Generator<NodeID_, DestID_> gen(cli_.scale(), cli_.degree()); el = gen.GenerateEL(cli_.uniform()); } g = MakeGraphFromEL(el); } return SquishGraph(g); } // Relabels (and rebuilds) graph by order of decreasing degree static CSRGraph<NodeID_, DestID_, invert> RelabelByDegree( const CSRGraph<NodeID_, DestID_, invert> &g) { if (g.directed()) { std::cout << "Cannot relabel directed graph" << std::endl; std::exit(-11); } Timer t; t.Start(); typedef std::pair<int64_t, NodeID_> degree_node_p; pvector<degree_node_p> degree_id_pairs(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) degree_id_pairs[n] = std::make_pair(g.out_degree(n), n); std::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_node_p>()); pvector<NodeID_> degrees(g.num_nodes()); pvector<NodeID_> new_ids(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[n] = degree_id_pairs[n].first; new_ids[degree_id_pairs[n].second] = n; } pvector<SGOffset> offsets = ParallelPrefixSum(degrees); DestID_* neighs = new DestID_[offsets[g.num_nodes()]]; DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for for (NodeID_ u=0; u < g.num_nodes(); u++) { for (NodeID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; std::sort(index[new_ids[u]], index[new_ids[u]+1]); } t.Stop(); PrintTime("Relabel", t.Seconds()); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs); } }; #endif // BUILDER_H_
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===--------------------------------------------------------------------===// // ExprIterator - Iterators for iterating over Stmt* arrays that contain // only Expr*. This is needed because AST nodes use Stmt* arrays to store // references to children (to be compatible with StmtIterator). //===--------------------------------------------------------------------===// class Stmt; class Expr; class ExprIterator : public std::iterator<std::forward_iterator_tag, Expr *&, ptrdiff_t, Expr *&, Expr *&> { Stmt** I; public: ExprIterator(Stmt** i) : I(i) {} ExprIterator() : I(nullptr) {} ExprIterator& operator++() { ++I; return *this; } ExprIterator operator-(size_t i) { return I-i; } ExprIterator operator+(size_t i) { return I+i; } Expr* operator[](size_t idx); // FIXME: Verify that this will correctly return a signed distance. signed operator-(const ExprIterator& R) const { return I - R.I; } Expr* operator*() const; Expr* operator->() const; bool operator==(const ExprIterator& R) const { return I == R.I; } bool operator!=(const ExprIterator& R) const { return I != R.I; } bool operator>(const ExprIterator& R) const { return I > R.I; } bool operator>=(const ExprIterator& R) const { return I >= R.I; } }; class ConstExprIterator : public std::iterator<std::forward_iterator_tag, const Expr *&, ptrdiff_t, const Expr *&, const Expr *&> { const Stmt * const *I; public: ConstExprIterator(const Stmt * const *i) : I(i) {} ConstExprIterator() : I(nullptr) {} ConstExprIterator& operator++() { ++I; return *this; } ConstExprIterator operator+(size_t i) const { return I+i; } ConstExprIterator operator-(size_t i) const { return I-i; } const Expr * operator[](size_t idx) const; signed operator-(const ConstExprIterator& R) const { return I - R.I; } const Expr * operator*() const; const Expr * operator->() const; bool operator==(const ConstExprIterator& R) const { return I == R.I; } bool operator!=(const ConstExprIterator& R) const { return I != R.I; } bool operator>(const ConstExprIterator& R) const { return I > R.I; } bool operator>=(const ConstExprIterator& R) const { return I >= R.I; } }; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class LLVM_ALIGNAS(LLVM_PTR_SIZE) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void* operator new(size_t bytes) throw() { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void* data) throw() { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 2; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned NumObjects : 32 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void* operator new(size_t bytes, void* mem) throw() { return mem; } void operator delete(void*, const ASTContext&, unsigned) throw() { } void operator delete(void*, const ASTContext*, unsigned) throw() { } void operator delete(void*, size_t) throw() { } void operator delete(void*, void*) throw() { } public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef StmtRange child_range; typedef ConstStmtRange const_child_range; child_range children(); const_child_range children() const { return const_cast<Stmt*>(this)->children(); } child_iterator child_begin() { return children().first; } child_iterator child_end() { return children().second; } const_child_iterator child_begin() const { return children().first; } const_child_iterator child_end() const { return children().second; } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm::iterator_range<decl_iterator> decl_range; typedef llvm::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(); } friend class ASTStmtReader; friend class ASTStmtWriter; }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBraceLoc, RBraceLoc; friend class ASTStmtReader; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, Stmt **Stmts, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_front() const { return !body_empty() ? Body[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return child_range(Body, Body + CompoundStmtBits.NumStmts); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { memcpy(getAttrArrayPtr(), Attrs.data(), Attrs.size() * sizeof(Attr *)); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { memset(getAttrArrayPtr(), 0, NumAttrs * sizeof(Attr *)); } Attr *const *getAttrArrayPtr() const { return reinterpret_cast<Attr *const *>(this + 1); } Attr **getAttrArrayPtr() { return reinterpret_cast<Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm::iterator_range<inputs_iterator> inputs_range; typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm::iterator_range<outputs_iterator> outputs_range; typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) { } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr) : VarAndKind(Var, Kind), Loc(Loc) { switch (Kind) { case VCK_This: assert(!Var && "'this' capture cannot have a variable!"); break; case VCK_ByRef: assert(Var && "capturing by reference must have a variable!"); break; case VCK_VLAType: assert(!Var && "Variable-length array type capture cannot have a variable!"); break; } } /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); } /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable. bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const { assert(capturesVariable() && "No variable available for 'this' or VAT capture"); return VarAndKind.getPointer(); } friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() const { return reinterpret_cast<Stmt **>(const_cast<CapturedStmt *>(this) + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return const_cast<CapturedStmt *>(this)->getCapturedStmt(); } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); } const CapturedDecl *getCapturedDecl() const { return const_cast<CapturedStmt *>(this)->getCapturedDecl(); } /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D) { assert(D && "null CapturedDecl"); CapDeclAndKind.setPointer(D); } /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const { return CapDeclAndKind.getInt(); } /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind) { CapDeclAndKind.setInt(Kind); } /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm::iterator_range<capture_iterator> capture_range; typedef llvm::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm::iterator_range<capture_init_iterator> capture_init_range; capture_init_range capture_inits() const { return capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr **>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper; LhsMapper lhs(_lhs,lhsStride); RhsMapper rhs(_rhs,rhsStride); ResMapper res(_res, resStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(Index shift=0; shift<threads; ++shift) { Index i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::evalTo(dst, lhs, rhs); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::addTo(dst, lhs, rhs); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::subTo(dst, lhs, rhs); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
7633.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop #pragma omp parallel { #pragma omp parallel for schedule(static, 8) num_threads(4) for (i = 0; i < _PB_NY; i++) y[i] = 0; #pragma omp parallel for private (j) schedule(static, 8) num_threads(4) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(3*t1-3*t2,2)),ceild(3*t1-2,4)),ceild(24*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(12*t1+Ny+15,16)),floord(24*t2+Ny+11,16)),floord(24*t1-24*t2+Nz+Ny+13,16));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-126,128)),ceild(3*t1-254,256)),ceild(24*t2-Nz-1011,1024)),ceild(16*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(12*t1+Nx+15,1024)),floord(24*t2+Nx+11,1024)),floord(16*t3+Nx+3,1024)),floord(24*t1-24*t2+Nz+Nx+13,1024));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),4*t3+2),256*t4+254);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
rose_scalar_privatization.c
/* a local variable to transfer temp value * It introduces "fake" data dependence since the variable is local to each iteration * */ #include "omp.h" int a[100]; int b[100]; void foo() { int i; #pragma omp parallel for private (i) for (i = 0; i <= 99; i += 1) { int tmp; tmp = a[i] + i; b[i] = tmp; } } /* *-------------Dump the dependence graph for the first loop in a function body!------------ // Output dependence // Loop-carried ,why CarryLevel =1???? dep SgExprStatement:tmp =((a[i]) + i); SgExprStatement:tmp =((a[i]) + i); 1*1 SCALAR_DEP; commonlevel = 1 CarryLevel = 1 SgVarRefExp:tmp@11:9->SgVarRefExp:tmp@11:9 == 0;||:: //True dependence for both //a) loop independent (within an iteration) and //b) loop carried (across iterations) : This is sure thing if a) holds dep SgExprStatement:tmp =((a[i]) + i); SgExprStatement:b[i] = tmp; 1*1 SCALAR_DEP; commonlevel = 1 CarryLevel = 1 SgVarRefExp:tmp@11:9->SgVarRefExp:tmp@12:12 == 0;||:: //Anti dependence //Loop carried(BACK_DEP) scalar dep SgExprStatement:b[i] = tmp; SgExprStatement:tmp =((a[i]) + i); 1*1 SCALAR_BACK_DEP; commonlevel = 1 CarryLevel = 0 SgVarRefExp:tmp@12:12->SgVarRefExp:tmp@11:9 <= -1;||:: */
cpu_ctc.h
#pragma once #include <tuple> #include <cmath> #include <limits> #include <algorithm> #include <numeric> #if !defined(CTC_DISABLE_OMP) && !defined(APPLE) #include <omp.h> #endif #include "ctc_helper.h" template<typename ProbT> class CpuCTC { public: // Noncopyable CpuCTC(int alphabet_size, int minibatch, void* workspace, int num_threads, int blank_label, bool simplified = false) : alphabet_size_(alphabet_size), minibatch_(minibatch), num_threads_(num_threads), workspace_(workspace), blank_label_(blank_label), simplified_(simplified) { #if defined(CTC_DISABLE_OMP) || defined(APPLE) #else if (num_threads > 0) { omp_set_num_threads(num_threads); } else { num_threads_ = omp_get_max_threads(); } #endif }; CpuCTC(const CpuCTC&) = delete; CpuCTC& operator=(const CpuCTC&) = delete; ctcStatus_t cost_and_grad(const ProbT* const activations, ProbT *grads, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths); ctcStatus_t score_forward(const ProbT* const activations, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths); private: class CpuCTC_metadata { private: int setup_labels(const int* const labels, int blank_label, int L, int S, bool simplified); public: CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size, void* workspace, size_t bytes_used, int blank_label, const int* const labels, bool simplified); ProbT* alphas; ProbT* betas; int* labels_w_blanks; int* e_inc; int* s_inc; ProbT* output; int repeats; }; int alphabet_size_; // Number of characters plus blank int minibatch_; int num_threads_; int blank_label_; bool simplified_; // Whether it's simplified CTC void* workspace_; void softmax(const ProbT* const activations, ProbT* probs, const int* const input_lengths); std::tuple<ProbT, bool> cost_and_grad_kernel(ProbT *grad, const ProbT* const probs, const int* const labels, int T, int L, int mb, size_t bytes_used); ProbT compute_alphas(const ProbT* probs, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const labels, ProbT* alphas); ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const probs, ProbT log_partition, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const labels, ProbT* alphas, ProbT* betas, ProbT* output); }; template<typename ProbT> CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size, void* workspace, size_t bytes_used, int blank_label, const int* const labels, bool simplified) { alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) * S * T; std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>()); betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) * S; std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>()); labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(int) * S; e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(int) * S; s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(int) * S; output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) * alphabet_size; repeats = setup_labels(labels, blank_label, L, S, simplified); } template<typename ProbT> int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels, int blank_label, int L, int S, bool simplified) { int e_counter = 0; int s_counter = 0; s_inc[s_counter++] = 1; int repeats = 0; for (int i = 1; i < L; ++i) { if (!simplified && labels[i-1] == labels[i]) { s_inc[s_counter++] = 1; s_inc[s_counter++] = 1; e_inc[e_counter++] = 1; e_inc[e_counter++] = 1; ++repeats; } else { s_inc[s_counter++] = 2; e_inc[e_counter++] = 2; } } e_inc[e_counter++] = 1; for (int i = 0; i < L; ++i) { labels_w_blanks[2 * i] = blank_label; labels_w_blanks[2 * i + 1] = labels[i]; } labels_w_blanks[S - 1] = blank_label; return repeats; } template<typename ProbT> void CpuCTC<ProbT>::softmax(const ProbT* const activations, ProbT* probs, const int* const input_lengths) { #pragma omp parallel for for (int mb = 0; mb < minibatch_; ++mb) { for(int c = 0; c < input_lengths[mb]; ++c) { int col_offset = (mb + minibatch_ * c) * alphabet_size_; ProbT max_activation = -std::numeric_limits<ProbT>::infinity(); for(int r = 0; r < alphabet_size_; ++r) max_activation = std::max(max_activation, activations[r + col_offset]); ProbT denom = ProbT(0.); for(int r = 0; r < alphabet_size_; ++r) { probs[r + col_offset] = std::exp(activations[r + col_offset] - max_activation); denom += probs[r + col_offset]; } for(int r = 0; r < alphabet_size_; ++r) { probs[r + col_offset] /= denom; } } } } template<typename ProbT> std::tuple<ProbT, bool> CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const probs, const int* const labels, int T, int L, int mb, size_t bytes_used) { const int S = 2*L + 1; // Number of labels with blanks CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, blank_label_, labels, simplified_); bool over_threshold = false; if (L + ctcm.repeats > T) { return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0 } ProbT llForward = compute_alphas(probs, ctcm.repeats, S, T, ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks, ctcm.alphas); ProbT llBackward = compute_betas_and_grad(grad, probs, llForward, ctcm.repeats, S, T, ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks, ctcm.alphas, ctcm.betas, ctcm.output); ProbT diff = std::abs(llForward - llBackward); if (diff > ctc_helper::threshold) { over_threshold = true; } return std::make_tuple(-llForward, over_threshold); } // Computes forward probabilities template<typename ProbT> ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* probs, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const labels, ProbT* alphas) { int start = (((S /2) + repeats - T) < 0) ? 0 : 1, end = S > 1 ? 2 : 1; for (int i = start; i < end; ++i) { alphas[i] = std::log(probs[labels[i]]); } for(int t = 1; t < T; ++t) { int remain = (S / 2) + repeats - (T - t); if(remain >= 0) start += s_inc[remain]; if(t <= (S / 2) + repeats) end += e_inc[t - 1]; int startloop = start; int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (alphabet_size_ * minibatch_); if (start == 0) { alphas[idx1] = alphas[idx2] + std::log(probs[blank_label_ + idx3]); startloop += 1; } for(int i = startloop; i < end; ++i) { ProbT prev_sum; if (simplified_) { if (labels[i] == blank_label_) { prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]); } else { prev_sum = alphas[(i-1) + idx2]; if (i > 1) { prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]); } } } else { prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]); // Skip two if not on blank and not on repeat. if (labels[i] != blank_label_ && i != 1 && labels[i] != labels[i-2]) prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]); } alphas[i + idx1] = prev_sum + std::log(probs[labels[i] + idx3]); } } ProbT loglike = ctc_helper::neg_inf<ProbT>(); for(int i = start; i < end; ++i) { loglike = ctc_helper::log_plus<ProbT>()(loglike, alphas[i + (T - 1) * S]); } return loglike; } // Starting from T, we sweep backward over the alpha array computing one column // of betas as we go. At each position we can update product alpha * beta and then // sum into the gradient associated with each label. // NOTE computes gradient w.r.t UNNORMALIZED final layer activations. // Assumed passed in grads are already zeroed! template<typename ProbT> ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const probs, ProbT log_partition, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const labels, ProbT* alphas, ProbT* betas, ProbT* output) { int start = S > 1 ? (S - 2) : 0, end = (T > (S / 2) + repeats) ? S : S-1; std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>()); //set the starting values in the beta column at the very right edge for (int i = start; i < end; ++i) { betas[i] = std::log(probs[labels[i] + (T - 1) * (alphabet_size_ * minibatch_)]); //compute alpha * beta in log space at this position in (S, T) space alphas[i + (T - 1) * S] += betas[i]; //update the gradient associated with this label //essentially performing a reduce-by-key in a sequential manner output[labels[i]] = ctc_helper::log_plus<ProbT>()(alphas[i + (T - 1) * S], output[labels[i]]); } //update the gradient wrt to each unique label for (int i = 0; i < alphabet_size_; ++i) { int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i; if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() || probs[idx3] == 0.0) { grad[idx3] = probs[idx3]; } else { grad[idx3] = probs[idx3] - std::exp(output[i] - std::log(probs[idx3]) - log_partition); } } //loop from the second to last column all the way to the left for(int t = T - 2; t >= 0; --t) { int remain = (S / 2) + repeats - (T - t); if(remain >= -1) start -= s_inc[remain + 1]; if(t < (S / 2) + repeats) end -= e_inc[t]; int endloop = end == S ? end - 1 : end; int idx1 = t * S, idx3 = t * (alphabet_size_ * minibatch_); std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>()); for(int i = start; i < endloop; ++i) { ProbT next_sum; if (simplified_) { if (labels[i] == blank_label_) { next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]); } else { next_sum = betas[(i+1)]; if (i < S - 2) { next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]); } } } else { next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]); // Skip two if not on blank and not on repeat. if (labels[i] != blank_label_ && i != (S-2) && labels[i] != labels[i+2]){ next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]); } } betas[i] = next_sum + std::log(probs[labels[i] + idx3]); //compute alpha * beta in log space alphas[i + idx1] += betas[i]; //update the gradient associated with this label output[labels[i]] = ctc_helper::log_plus<ProbT>()(alphas[i + idx1], output[labels[i]]); } if (end == S) { betas[(S-1)] = betas[(S-1)] + std::log(probs[blank_label_ + idx3]); alphas[(S-1) + idx1] += betas[(S-1)]; output[labels[S-1]] = ctc_helper::log_plus<ProbT>()(alphas[S-1 + idx1], output[labels[S-1]]); } //go over the unique labels and compute the final grad // wrt to each one at this time step for (int i = 0; i < alphabet_size_; ++i) { if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() || probs[idx3] == 0.0) { grad[idx3] = probs[idx3]; } else { grad[idx3] = probs[idx3] - std::exp(output[i] - std::log(probs[idx3]) - log_partition); } ++idx3; } } ProbT loglike = ctc_helper::neg_inf<ProbT>(); for(int i = start; i < end; ++i) { loglike = ctc_helper::log_plus<ProbT>()(loglike, betas[i]); } return loglike; } template<typename ProbT> ctcStatus_t CpuCTC<ProbT>::cost_and_grad(const ProbT* const activations, ProbT *grads, ProbT *costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { if (activations == nullptr || grads == nullptr || costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) return CTC_STATUS_INVALID_VALUE; ProbT* probs = static_cast<ProbT *>(workspace_); int maxT = *std::max_element(input_lengths, input_lengths + minibatch_); size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT; //per minibatch memory size_t per_minibatch_bytes = 0; int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);; int maxS = 2 * maxL + 1; //output per_minibatch_bytes += sizeof(float) * alphabet_size_; //alphas per_minibatch_bytes += sizeof(float) * maxS * maxT; //betas per_minibatch_bytes += sizeof(float) * maxS; //labels w/blanks, e_inc, s_inc per_minibatch_bytes += 3 * sizeof(int) * maxS; softmax(activations, probs, input_lengths); #pragma omp parallel for for (int mb = 0; mb < minibatch_; ++mb) { const int T = input_lengths[mb]; // Length of utterance (time) const int L = label_lengths[mb]; // Number of labels in transcription bool mb_status; std::tie(costs[mb], mb_status) = cost_and_grad_kernel(grads + mb * alphabet_size_, probs + mb * alphabet_size_, flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0), T, L, mb, bytes_used + mb * per_minibatch_bytes); } return CTC_STATUS_SUCCESS; } template<typename ProbT> ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const activations, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { if (activations == nullptr || costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) return CTC_STATUS_INVALID_VALUE; ProbT* probs = static_cast<ProbT *>(workspace_); int maxT = *std::max_element(input_lengths, input_lengths + minibatch_); size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT; //per minibatch memory size_t per_minibatch_bytes = 0; int maxL = *std::max_element(label_lengths, label_lengths + minibatch_); int maxS = 2 * maxL + 1; //output per_minibatch_bytes += sizeof(float) * alphabet_size_; //alphas per_minibatch_bytes += sizeof(float) * maxS * maxT; //betas per_minibatch_bytes += sizeof(float) * maxS; //labels w/blanks, e_inc, s_inc per_minibatch_bytes += 3 * sizeof(int) * maxS; softmax(activations, probs, input_lengths); #pragma omp parallel for for (int mb = 0; mb < minibatch_; ++mb) { const int T = input_lengths[mb]; // Length of utterance (time) const int L = label_lengths[mb]; // Number of labels in transcription const int S = 2*L + 1; // Number of labels with blanks CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used + mb * per_minibatch_bytes, blank_label_, flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0), simplified_); if (L + ctcm.repeats > T) costs[mb] = ProbT(0); else { costs[mb] = -compute_alphas(probs + mb * alphabet_size_, ctcm.repeats, S, T, ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks, ctcm.alphas); } } return CTC_STATUS_SUCCESS; }
app_baseline.c
/** * @file app.c * @brief Template for a Host Application Source File. * */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <unistd.h> #include <getopt.h> #include <assert.h> #include <stdint.h> #include <omp.h> #include "../../support/timer.h" static uint64_t *A; static uint64_t *B; static uint64_t *C; static uint64_t *C2; static int pos; bool pred(const uint64_t x){ return (x % 2) == 0; } void *create_test_file(unsigned int nr_elements) { //srand(0); A = (uint64_t*) malloc(nr_elements * sizeof(uint64_t)); B = (uint64_t*) malloc(nr_elements * sizeof(uint64_t)); C = (uint64_t*) malloc(nr_elements * sizeof(uint64_t)); printf("nr_elements\t%u\t", nr_elements); for (int i = 0; i < nr_elements; i++) { //A[i] = (unsigned int) (rand()); A[i] = i+1; B[i] = 0; } } /** * @brief compute output in the host */ static int select_host(int size, int t) { pos = 0; C[pos] = A[pos]; omp_set_num_threads(t); #pragma omp parallel for for(int my = 1; my < size; my++) { if(!pred(A[my])) { int p; #pragma omp atomic update pos++; p = pos; C[p] = A[my]; } } return pos; } // Params --------------------------------------------------------------------- typedef struct Params { char* dpu_type; int input_size; int n_warmup; int n_reps; int n_threads; }Params; void usage() { fprintf(stderr, "\nUsage: ./program [options]" "\n" "\nGeneral options:" "\n -h help" "\n -d <D> DPU type (default=fsim)" "\n -t <T> # of threads (default=8)" "\n -w <W> # of untimed warmup iterations (default=2)" "\n -e <E> # of timed repetition iterations (default=5)" "\n" "\nBenchmark-specific options:" "\n -i <I> input size (default=8M elements)" "\n"); } struct Params input_params(int argc, char **argv) { struct Params p; p.input_size = 16 << 20; p.n_warmup = 1; p.n_reps = 3; p.n_threads = 5; int opt; while((opt = getopt(argc, argv, "hi:w:e:t:")) >= 0) { switch(opt) { case 'h': usage(); exit(0); break; case 'i': p.input_size = atoi(optarg); break; case 'w': p.n_warmup = atoi(optarg); break; case 'e': p.n_reps = atoi(optarg); break; case 't': p.n_threads = atoi(optarg); break; default: fprintf(stderr, "\nUnrecognized option!\n"); usage(); exit(0); } } assert(p.n_threads > 0 && "Invalid # of ranks!"); return p; } /** * @brief Main of the Host Application. */ int main(int argc, char **argv) { struct Params p = input_params(argc, argv); const unsigned int file_size = p.input_size; uint32_t accum = 0; int total_count; // Create an input file with arbitrary data. create_test_file(file_size); Timer timer; start(&timer, 0, 0); total_count = select_host(file_size, p.n_threads); stop(&timer, 0); printf("Total count = %d\t", total_count); printf("Kernel "); print(&timer, 0, 1); printf("\n"); free(A); free(B); free(C); return 0; }
arrayInStruct_openMP.c
#include "arrayInStruct_openMP.h" void arrayInStruct__openMP(struct awl_unsignedS32 * v0, struct awl_unsignedS32 * out) { struct s_2_1xunsignedS32_1xawl_unsignedS32 e0 = { 0 }; struct s_2_1xunsignedS32_1xawl_unsignedS32 v6 = { 0 }; bool v3; (e0).member1 = (*v0).length; ((e0).member2).buffer = initCopyArray(((e0).member2).buffer, ((e0).member2).length, sizeof(uint32_t), (*v0).buffer, (*v0).length); ((e0).member2).length = (*v0).length; v3 = ((e0).member1 > 0); while (v3) { uint32_t len1; struct awl_unsignedS32 e2 = { 0 }; (v6).member1 = ((e0).member1 - 1); len1 = ((e0).member2).length; ((v6).member2).buffer = initArray(((v6).member2).buffer, ((v6).member2).length, sizeof(uint32_t), len1); ((v6).member2).length = len1; #pragma omp parallel for for (uint32_t v10 = 0; v10 < len1; v10 += 1) { ((v6).member2).buffer[v10] = (((e0).member2).buffer[v10] + 5); } e2 = (e0).member2; e0 = v6; (v6).member2 = e2; v3 = ((e0).member1 > 0); } (*out).buffer = initCopyArray((*out).buffer, (*out).length, sizeof(uint32_t), ((e0).member2).buffer, ((e0).member2).length); (*out).length = ((e0).member2).length; freeArray(((e0).member2).buffer); freeArray(((v6).member2).buffer); }
Triangular_BCSC.h
// // Created by kazem on 7/17/17. // #ifndef TRIANGOPENMP_TRIANGULAR_H #define TRIANGOPENMP_TRIANGULAR_H #include <cassert> #include "BLAS.h" #include "mkl.h" namespace nasoq { #define MKL_BLAS /* * Forward solve blocked */ int blockedLsolve(int n, size_t *Lp, int *Li, double *Lx, int NNZ, size_t *Li_ptr, int *col2sup, int *sup2col, int supNo, double *x) { int p, j; double one[2], zero[2]; one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */ one[1] = 0.; zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */ zero[1] = 0.; int ione = 1; double *tempVec = new double[n](); if (!Lp || !Li || !x) return (0); /* check inputs */ for (int i = 1; i <= supNo; ++i) {// for each supernode int curCol = i != 0 ? sup2col[i - 1] : 0; int nxtCol = sup2col[i]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode //lSolve_dense(nSupR,supWdt,Ltrng,&x[curCol]); dlsolve_blas_nonUnit(nSupR, supWdt, Ltrng, &x[curCol]); Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal //matVector(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); dmatvec_blas(nSupR, nSupR - supWdt, supWdt, Ltrng, &x[curCol], tempVec); for (int l = Li_ptr[curCol] + supWdt, k = 0; l < Li_ptr[nxtCol]; ++l, ++k) { x[Li[l]] -= tempVec[k]; tempVec[k] = 0; } #if 0 for (int k = 0; k < 200; ++k) { std::cout<<","<<x[k]; } std::cout<<"\n"; #endif } delete[]tempVec; return (1); } /* * Backward solve blocked, unit triangular */ int blockedLTsolve(int n, size_t *Lp, int *Li, double *Lx, int NNZ, size_t *Li_ptr, int *col2sup, int *sup2col, int supNo, double *x) { int p, j; double one[2], zero[2]; one[0] = 1.0; one[1] = 0.; zero[0] = 0.; zero[1] = 0.; double minus_one = -1; int ione = 1; double *tempVec = new double[n](); if (!Lp || !Li || !x) return (0); /* check inputs */ for (int i = supNo; i > 0; --i) {// for each supernode int curCol = i != 0 ? sup2col[i - 1] : 0; int nxtCol = sup2col[i]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal for (int l = 0; l < nSupR - supWdt; ++l) { tempVec[l] = x[Li[Li_ptr[curCol] + supWdt + l]]; } #ifdef BLAS1 //FIXME dmatvec_blas(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); #endif #ifdef MKL_BLAS int tmpRow = nSupR - supWdt; dgemv("T", &tmpRow, &supWdt, &minus_one, Ltrng, &nSupR, tempVec, &ione, one, &x[curCol], &ione); #endif Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode #ifdef BLAS1//FIXME dlsolve_blas_nonUnit(nSupR,supWdt,Ltrng,&x[curCol]);//FIXME make it for transpose #endif #ifdef MKL_BLAS dtrsm("L", "L", "T", "N", &supWdt, &ione, one, Ltrng, &nSupR, &x[curCol], &n); #endif #if 0 for (int k = 0; k < 200; ++k) { std::cout<<","<<x[k]; } std::cout<<"\n"; #endif } delete[]tempVec; return (1); } /* * */ int LeveledBlockedLTsolve_update(int n, size_t *Lp, int *Li, double *Lx, int NNZ, size_t *Li_ptr, int *col2sup, int *sup2col, int supNo, double *x, int levels, int *levelPtr, int *levelSet, int chunk, bool *marked, double *ws_dbl = NULL) { //int chunk = 70; double one[2], zero[2]; one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */ one[1] = 0.; zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */ zero[1] = 0.; int ione = 1; double minus_one = -1; //double *tempVec = new double[n](); if (!Lp || !Li || !x) return (0); /* check inputs */ for (int i1 = levels - 1; i1 >= 0; --i1) { int li = 0; #pragma omp parallel private(li) { //tempVec = new double[n](); double *tempVec; if (ws_dbl == NULL) { tempVec = (double *) calloc(n, sizeof(double)); } else {//FIXME: the else part is not right //std::cout<<"-> "<<omp_get_thread_num()<<"\n"; tempVec = ws_dbl + omp_get_thread_num() * n; } #pragma omp for \ schedule(static) for (li = levelPtr[i1]; li < levelPtr[i1 + 1]; ++li) { int i = levelSet[li]; if (!marked[i]) continue; int curCol = sup2col[i]; int nxtCol = sup2col[i + 1]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal for (int l = 0; l < nSupR - supWdt; ++l) { tempVec[l] = x[Li[Li_ptr[curCol] + supWdt + l]]; } #ifdef BLAS1 //FIXME dmatvec_blas(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); #endif #ifdef MKL_BLAS int tmpRow = nSupR - supWdt; dgemv("T", &tmpRow, &supWdt, &minus_one, Ltrng, &nSupR, tempVec, &ione, one, &x[curCol], &ione); #endif Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode #ifdef BLAS1//FIXME dlsolve_blas_nonUnit(nSupR,supWdt,Ltrng,&x[curCol]);//FIXME make it for transpose #endif #ifdef MKL_BLAS dtrsm("L", "L", "T", "N", &supWdt, &ione, one, Ltrng, &nSupR, &x[curCol], &n); #endif for (int l = 0; l < nSupR - supWdt; ++l) { tempVec[l] = 0; } } if (ws_dbl == NULL) { free(tempVec); } } } return (1); } /* * Blocked Serial code */ //#define BLAS2 int blockedPrunedLSolve(int n, int *Lp, int *Li, double *Lx, int NNZ, int *Li_ptr, int *BPSet, int PBSetSize, int *sup2col, int supNo, double *x) { int p, j, i; double tmp = 0; double one[2], zero[2]; one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */ one[1] = 0.; zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */ zero[1] = 0.; int ione = 1; double *tempVec = new double[n](); if (!Lp || !Li || !x) return (0); /* check inputs */ // for (int i = 2530; i < supNo; ++i) {// for each supernode for (int ps = 0; ps < PBSetSize; ++ps) {// for each supernode i = BPSet[ps]; int curCol = i != 0 ? sup2col[i - 1] : 0; int nxtCol = sup2col[i]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode //lSolve_dense(nSupR,supWdt,Ltrng,&x[curCol]); #ifdef BLAS1 dlsolve_blas_nonUnit(nSupR,supWdt,Ltrng,&x[curCol]); #endif #ifdef BLAS2 dtrsm("L", "L", "N", "N", &supWdt,&ione,one,Ltrng, &nSupR,&x[curCol],&n); #endif Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal //matVector(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); #ifdef BLAS1 dmatvec_blas(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); #endif #ifdef BLAS2 int tmpRow=nSupR - supWdt; dgemv("N",&tmpRow,&supWdt,one,Ltrng,&nSupR,&x[curCol],&ione, zero,tempVec,&ione); #endif for (int l = Li_ptr[curCol] + supWdt, k = 0; l < Li_ptr[nxtCol]; ++l, ++k) { x[Li[l]] -= tempVec[k]; tempVec[k] = 0; } #if 0 for (int k = 0; k < 200; ++k) { std::cout<<","<<x[k]; } std::cout<<"\n"; #endif } delete[]tempVec; return (1); } /* * Parallel Blocked */ int leveledBlockedLsolve(int n, size_t *Lp, int *Li, double *Lx, int NNZ, size_t *Li_ptr, int *col2sup, int *sup2col, int supNo, double *x, int levels, int *levelPtr, int *levelSet, int chunk) { //int chunk = 70; double one[2], zero[2]; one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */ one[1] = 0.; zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */ zero[1] = 0.; int ione = 1; //double *tempVec = new double[n](); double *tempVec; if (!Lp || !Li || !x) return (0); /* check inputs */ for (int l = 0; l < levels; ++l) { int li = 0; #pragma omp parallel private(li, tempVec) { //tempVec = new double[n](); tempVec = (double *) calloc(n, sizeof(double)); #pragma omp for \ schedule(static) for (li = levelPtr[l]; li < levelPtr[l + 1]; ++li) { int i = levelSet[li]; int curCol = sup2col[i]; int nxtCol = sup2col[i + 1]; int supWdt = nxtCol - curCol; assert(supWdt > 0); int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode //lSolve_dense_col_sync(nSupR,supWdt,Ltrng,&x[curCol]); dlsolve_blas_nonUnit(nSupR, supWdt, Ltrng, &x[curCol]); Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal //matVector(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); dmatvec_blas(nSupR, nSupR - supWdt, supWdt, Ltrng, &x[curCol], tempVec); // #pragma omp critical for (int l = Li_ptr[curCol] + supWdt, k = 0; l < Li_ptr[nxtCol]; ++l, ++k) { #pragma omp atomic x[Li[l]] -= tempVec[k]; tempVec[k] = 0; } } free(tempVec); } } return (1); } /* * Parallel Blocked with masked cols */ int leveledBlockedLsolve_update(int n, size_t *Lp, int *Li, double *Lx, int NNZ, size_t *Li_ptr, int *col2sup, int *sup2col, int supNo, double *x, int levels, int *levelPtr, int *levelSet, int chunk, bool *mask, double *ws_dbl = NULL) { //int chunk = 70; double one[2], zero[2]; one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */ one[1] = 0.; zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */ zero[1] = 0.; int ione = 1; //double *tempVec = new double[n](); if (!Lp || !Li || !x) return (0); /* check inputs */ for (int l = 0; l < levels; ++l) { int li = 0; #pragma omp parallel private(li) { double *tempVec; if (ws_dbl == NULL) { tempVec = (double *) calloc(n, sizeof(double)); } else {//FIXME: the else part is not right //std::cout<<"-> "<<omp_get_thread_num()<<"\n"; tempVec = ws_dbl + omp_get_thread_num() * n; } #pragma omp for \ schedule(static) for (li = levelPtr[l]; li < levelPtr[l + 1]; ++li) { int i = levelSet[li]; if (!mask[i]) continue; int curCol = sup2col[i]; int nxtCol = sup2col[i + 1]; int supWdt = nxtCol - curCol; assert(supWdt > 0); int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode //lSolve_dense_col_sync(nSupR,supWdt,Ltrng,&x[curCol]); dlsolve_blas_nonUnit(nSupR, supWdt, Ltrng, &x[curCol]); Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal //matVector(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); dmatvec_blas(nSupR, nSupR - supWdt, supWdt, Ltrng, &x[curCol], tempVec); // #pragma omp critical for (int l = Li_ptr[curCol] + supWdt, k = 0; l < Li_ptr[nxtCol]; ++l, ++k) { #pragma omp atomic x[Li[l]] -= tempVec[k]; tempVec[k] = 0; } } free(tempVec); } } return (1); } /* * Parallel H2 Blocked */ //#define MKL_BLAS int H2LeveledBlockedLsolve(int n, size_t *Lp, int *Li, double *Lx, int NNZ, size_t *Li_ptr, int *col2sup, int *sup2col, int supNo, double *x, int levels, int *levelPtr, int *levelSet, int parts, int *parPtr, int *partition, int chunk) { //int chunk = 70; double one[2], zero[2]; one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */ one[1] = 0.; zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */ zero[1] = 0.; int ione = 1; //double *tempVec = new double[n](); double *tempVec; if (!Lp || !Li || !x) return (0); /* check inputs */ for (int i1 = 0; i1 < levels; ++i1) { int j1 = 0; #pragma omp parallel //shared(lValues)//private(map, contribs) { #pragma omp for schedule(static) private(j1, tempVec) for (j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) { //tempVec = new double[n](); tempVec = (double *) calloc(n, sizeof(double)); for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) { int i = partition[k1]; int curCol = sup2col[i]; int nxtCol = sup2col[i + 1]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode //lSolve_dense(nSupR,supWdt,Ltrng,&x[curCol]); #ifdef MKL_BLAS dtrsm("L", "L", "N", "N", &supWdt, &ione, one, Ltrng, &nSupR, &x[curCol], &n); #else dlsolve_blas_nonUnit(nSupR, supWdt, Ltrng, &x[curCol]); #endif Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal //matVector(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); #ifdef MKL_BLAS int tmpRow = nSupR - supWdt; dgemv("N", &tmpRow, &supWdt, one, Ltrng, &nSupR, &x[curCol], &ione, zero, tempVec, &ione); #else dmatvec_blas(nSupR, nSupR - supWdt, supWdt, Ltrng, &x[curCol], tempVec); #endif // #pragma omp critical //FIXME: I don't thnink we need this for (int l = Li_ptr[curCol] + supWdt, k = 0; l < Li_ptr[nxtCol]; ++l, ++k) { #pragma omp atomic x[Li[l]] -= tempVec[k]; tempVec[k] = 0; } } free(tempVec); } } } return (1); } int H2LeveledBlockedLsolve_update(int n, size_t *Lp, int *Li, double *Lx, int NNZ, size_t *Li_ptr, int *col2sup, int *sup2col, int supNo, double *x, int levels, int *levelPtr, int *levelSet, int parts, int *parPtr, int *partition, int chunk, bool *mask, double *ws_dbl = NULL) { //int chunk = 70; double one[2], zero[2]; one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */ one[1] = 0.; zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */ zero[1] = 0.; int ione = 1; //double *tempVec = new double[n](); //double *tempVec; if (!Lp || !Li || !x) return (0); /* check inputs */ for (int i1 = 0; i1 < levels; ++i1) { int j1 = 0; #pragma omp parallel //shared(lValues)//private(map, contribs) { #pragma omp for schedule(static) private(j1) for (j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) { //tempVec = new double[n](); double *tempVec; if (ws_dbl == NULL) { tempVec = (double *) calloc(n, sizeof(double)); } else {//FIXME: the else part is not right //std::cout<<"-> "<<omp_get_thread_num()<<"\n"; tempVec = ws_dbl + omp_get_thread_num() * n; } for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) { int i = partition[k1]; if (!mask[i]) continue; int curCol = sup2col[i]; int nxtCol = sup2col[i + 1]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode //lSolve_dense(nSupR,supWdt,Ltrng,&x[curCol]); #ifdef MKL_BLAS dtrsm("L", "L", "N", "N", &supWdt, &ione, one, Ltrng, &nSupR, &x[curCol], &n); #else dlsolve_blas_nonUnit(nSupR, supWdt, Ltrng, &x[curCol]); #endif Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal //matVector(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); #ifdef MKL_BLAS int tmpRow = nSupR - supWdt; dgemv("N", &tmpRow, &supWdt, one, Ltrng, &nSupR, &x[curCol], &ione, zero, tempVec, &ione); #else dmatvec_blas(nSupR, nSupR - supWdt, supWdt, Ltrng, &x[curCol], tempVec); #endif // #pragma omp critical for (int l = Li_ptr[curCol] + supWdt, k = 0; l < Li_ptr[nxtCol]; ++l, ++k) { #pragma omp atomic x[Li[l]] -= tempVec[k]; tempVec[k] = 0; } } if (ws_dbl == NULL) { free(tempVec); } } } } return (1); } /* * Backward solve blocked, unit triangular */ //#define MKL_BLAS int H2LeveledBlockedLTsolve(int n, size_t *Lp, int *Li, double *Lx, int NNZ, size_t *Li_ptr, int *col2sup, int *sup2col, int supNo, double *x, int levels, int *levelPtr, int *levelSet, int parts, int *parPtr, int *partition, int chunk) { //int chunk = 70; double one[2], zero[2]; one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */ one[1] = 0.; zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */ zero[1] = 0.; int ione = 1; double minus_one = -1; //double *tempVec = new double[n](); double *tempVec; if (!Lp || !Li || !x) return (0); /* check inputs */ for (int i1 = levels - 1; i1 >= 0; --i1) { int j1 = 0; #pragma omp parallel //shared(lValues)//private(map, contribs) { #pragma omp for schedule(static) private(j1, tempVec) for (j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) { //tempVec = new double[n](); tempVec = (double *) calloc(n, sizeof(double)); for (int k1 = parPtr[j1 + 1] - 1; k1 >= parPtr[j1]; --k1) { int i = partition[k1]; int curCol = sup2col[i]; int nxtCol = sup2col[i + 1]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal for (int l = 0; l < nSupR - supWdt; ++l) { tempVec[l] = x[Li[Li_ptr[curCol] + supWdt + l]]; } #ifdef BLAS1 //FIXME dmatvec_blas(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); #endif #ifdef MKL_BLAS int tmpRow = nSupR - supWdt; dgemv("T", &tmpRow, &supWdt, &minus_one, Ltrng, &nSupR, tempVec, &ione, one, &x[curCol], &ione); #endif Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode #ifdef BLAS1//FIXME dlsolve_blas_nonUnit(nSupR,supWdt,Ltrng,&x[curCol]);//FIXME make it for transpose #endif #ifdef MKL_BLAS dtrsm("L", "L", "T", "N", &supWdt, &ione, one, Ltrng, &nSupR, &x[curCol], &n); #endif } free(tempVec); } } } return (1); } int H2LeveledBlockedLTsolve_update(int n, size_t *Lp, int *Li, double *Lx, int NNZ, size_t *Li_ptr, int *col2sup, int *sup2col, int supNo, double *x, int levels, int *levelPtr, int *levelSet, int parts, int *parPtr, int *partition, int chunk, bool *mask, double *ws_dbl = NULL) { //int chunk = 70; double one[2], zero[2]; one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */ one[1] = 0.; zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */ zero[1] = 0.; int ione = 1; double minus_one = -1; //double *tempVec = new double[n](); //double *tempVec; if (!Lp || !Li || !x) return (0); /* check inputs */ for (int i1 = levels - 1; i1 >= 0; --i1) { int j1 = 0; #pragma omp parallel //shared(lValues)//private(map, contribs) { #pragma omp for schedule(static) private(j1) for (j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) { //tempVec = new double[n](); //tempVec = (double *) calloc(n , sizeof(double)); double *tempVec; if (ws_dbl == NULL) { tempVec = (double *) calloc(n, sizeof(double)); } else {//FIXME: the else part is not right //std::cout<<"-> "<<omp_get_thread_num()<<"\n"; tempVec = ws_dbl + omp_get_thread_num() * n; } for (int k1 = parPtr[j1 + 1] - 1; k1 >= parPtr[j1]; --k1) { int i = partition[k1]; if (!mask[i]) { continue; } int curCol = sup2col[i]; int nxtCol = sup2col[i + 1]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal for (int l = 0; l < nSupR - supWdt; ++l) { tempVec[l] = x[Li[Li_ptr[curCol] + supWdt + l]]; } #ifdef BLAS1 //FIXME dmatvec_blas(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); #endif #ifdef MKL_BLAS int tmpRow = nSupR - supWdt; dgemv("T", &tmpRow, &supWdt, &minus_one, Ltrng, &nSupR, tempVec, &ione, one, &x[curCol], &ione); #endif Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode #ifdef BLAS1//FIXME dlsolve_blas_nonUnit(nSupR,supWdt,Ltrng,&x[curCol]);//FIXME make it for transpose #endif #ifdef MKL_BLAS dtrsm("L", "L", "T", "N", &supWdt, &ione, one, Ltrng, &nSupR, &x[curCol], &n); #endif for (int l = 0; l < nSupR - supWdt; ++l) { tempVec[l] = 0; } } if (ws_dbl == NULL) { free(tempVec); } } } } return (1); } /* * Parallel H2 Blocked with peeling */ #undef MKL_BLAS int H2LeveledBlockedLsolve_Peeled(int n, size_t *Lp, int *Li, double *Lx, int NNZ, size_t *Li_ptr, int *col2sup, int *sup2col, int supNo, double *x, int levels, int *levelPtr, int *levelSet, int parts, int *parPtr, int *partition, int chunk, int threads) { //int chunk = 70; double one[2], zero[2]; one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */ one[1] = 0.; zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */ zero[1] = 0.; int ione = 1; //double *tempVec = new double[n](); //MKL_Domain_Set_Num_Threads(1, MKL_DOMAIN_BLAS); double *tempVec; if (!Lp || !Li || !x) return (0); /* check inputs */ for (int i1 = 0; i1 < levels - 1; ++i1) { int j1 = 0; #pragma omp parallel //shared(lValues)//private(map, contribs) { #pragma omp for schedule(static) private(j1, tempVec) for (j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) { //tempVec = new double[n](); tempVec = (double *) calloc(n, sizeof(double)); for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) { int i = partition[k1]; int curCol = sup2col[i]; int nxtCol = sup2col[i + 1]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode //lSolve_dense(nSupR,supWdt,Ltrng,&x[curCol]); #ifdef MKL_BLAS dtrsm("L", "L", "N", "N", &supWdt,&ione,one,Ltrng, &nSupR,&x[curCol],&n); #else dlsolve_blas_nonUnit(nSupR, supWdt, Ltrng, &x[curCol]); #endif Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal //matVector(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); #ifdef MKL_BLAS int tmpRow=nSupR - supWdt; dgemv("N",&tmpRow,&supWdt,one,Ltrng,&nSupR,&x[curCol],&ione, zero,tempVec,&ione); #else dmatvec_blas(nSupR, nSupR - supWdt, supWdt, Ltrng, &x[curCol], tempVec); #endif #pragma omp critical for (int l = Li_ptr[curCol] + supWdt, k = 0; l < Li_ptr[nxtCol]; ++l, ++k) { #pragma omp atomic x[Li[l]] -= tempVec[k]; tempVec[k] = 0; } } free(tempVec); } } } #define MKL_BLAS MKL_Domain_Set_Num_Threads(threads, MKL_DOMAIN_BLAS); //for (int i1 = 0; i1 < levels ; ++i1) { int i1 = levels - 1; int j1 = 0; //#pragma omp parallel //shared(lValues)//private(map, contribs) // { //#pragma omp for schedule(auto) private(j1, tempVec) for (j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) { //tempVec = new double[n](); tempVec = (double *) calloc(n, sizeof(double)); for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) { int i = partition[k1]; int curCol = sup2col[i]; int nxtCol = sup2col[i + 1]; int supWdt = nxtCol - curCol; int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode double *Ltrng = &Lx[Lp[curCol]];//first nnz of current supernode //lSolve_dense(nSupR,supWdt,Ltrng,&x[curCol]); #ifdef MKL_BLAS dtrsm("L", "L", "N", "N", &supWdt, &ione, one, Ltrng, &nSupR, &x[curCol], &n); #else dlsolve_blas_nonUnit(nSupR, supWdt, Ltrng, &x[curCol]); #endif Ltrng = &Lx[Lp[curCol] + supWdt];//first nnz of below diagonal //matVector(nSupR,nSupR-supWdt,supWdt,Ltrng,&x[curCol],tempVec); #ifdef MKL_BLAS int tmpRow = nSupR - supWdt; dgemv("N", &tmpRow, &supWdt, one, Ltrng, &nSupR, &x[curCol], &ione, zero, tempVec, &ione); #else dmatvec_blas(nSupR, nSupR - supWdt, supWdt, Ltrng, &x[curCol], tempVec); #endif // #pragma omp critical for (int l = Li_ptr[curCol] + supWdt, k = 0; l < Li_ptr[nxtCol]; ++l, ++k) { //#pragma omp atomic x[Li[l]] -= tempVec[k]; tempVec[k] = 0; } } free(tempVec); } // } //} return (1); } } #endif //TRIANGOPENMP_TRIANGULAR_H
GB_binop__eq_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__eq_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__eq_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint16) // A*D function (colscale): GB (_AxD__eq_uint16) // D*A function (rowscale): GB (_DxB__eq_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint16) // C=scalar+B GB (_bind1st__eq_uint16) // C=scalar+B' GB (_bind1st_tran__eq_uint16) // C=A+scalar GB (_bind2nd__eq_uint16) // C=A'+scalar GB (_bind2nd_tran__eq_uint16) // C type: bool // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT16 || GxB_NO_EQ_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/magick-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/timer.h" #include "MagickCore/timer-private.h" #include "MagickCore/token.h" #include "MagickCore/token-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #include "MagickCore/xwindow-private.h" /* Constant declaration. */ const char BackgroundColor[] = "#ffffff", /* white */ BorderColor[] = "#dfdfdf", /* gray */ DefaultTileFrame[] = "15x15+3+3", DefaultTileGeometry[] = "120x120+4+3>", DefaultTileLabel[] = "%f\n%G\n%b", ForegroundColor[] = "#000", /* black */ LoadImageTag[] = "Load/Image", LoadImagesTag[] = "Load/Images", MatteColor[] = "#bdbdbd", /* gray */ PSDensityGeometry[] = "72.0x72.0", PSPageGeometry[] = "612x792", SaveImageTag[] = "Save/Image", SaveImagesTag[] = "Save/Images", TransparentColor[] = "#00000000"; /* transparent black */ const double DefaultResolution = 72.0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireCriticalMemory(sizeof(*image)); (void) memset(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MagickPathExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; (void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color, exception); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image->transparent_color,exception); GetTimerInfo(&image->timer); image->cache=AcquirePixelCache(0); image->channel_mask=DefaultChannels; image->channel_map=AcquirePixelChannelMap(); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=GetMagickTime(); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AcquireSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; (void) memset(&geometry,0,sizeof(geometry)); flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->matte_color=image_info->matte_color; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); /* Set all global options that map to per-image settings. */ (void) SyncImageSettings(image_info,image,exception); /* Global options that are only set for new images. */ option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (image->delay > (size_t) floor(geometry_info.rho+0.5)) image->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (image->delay < (size_t) floor(geometry_info.rho+0.5)) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else image->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info)); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info,exception); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MagickPathExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MagickPathExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType homogeneous_colorspace, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); alpha_trait=images->alpha_trait; number_images=1; width=images->columns; height=images->rows; depth=images->depth; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse) { append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace,exception); append_image->depth=depth; append_image->alpha_trait=alpha_trait; append_image->page=images->page; (void) SetImageBackgroundColor(append_image,exception); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(next,&pixel); for (x=0; x < (ssize_t) next->columns; x++) { GetPixelInfoPixel(next,p,&pixel); SetPixelViaPixelInfo(append_image,&pixel,q); p+=GetPixelChannels(next); q+=GetPixelChannels(append_image); } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception) { return(ClipImagePath(image,"#1",MagickTrue,exception)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside,ExceptionInfo *exception) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property,exception); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename, MagickPathExtent); (void) ConcatenateMagickString(image_info->filename,pathname, MagickPathExtent); clip_mask=BlobToImage(image_info,value,strlen(value),exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask,exception); if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse,exception); (void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageMask(image,WritePixelMask,clip_mask,exception); clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { Image *clone_image; double scale; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) memset(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->number_channels=image->number_channels; clone_image->number_meta_channels=image->number_meta_channels; clone_image->metacontent_extent=image->metacontent_extent; clone_image->colorspace=image->colorspace; clone_image->alpha_trait=image->alpha_trait; clone_image->channels=image->channels; clone_image->mask_trait=image->mask_trait; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; clone_image->image_info=CloneImageInfo(image->image_info); (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->extent=image->extent; clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; clone_image->channel_mask=image->channel_mask; clone_image->channel_map=ClonePixelChannelMap(image->channel_map); (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MagickPathExtent); (void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent); (void) CopyMagickString(clone_image->filename,image->filename, MagickPathExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AcquireSemaphoreInfo(); if (image->colormap != (PixelInfo *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelInfo *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) floor(scale*image->page.width+0.5); clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5); clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) floor(scale*image->page.height+0.5); clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5); clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse) clone_image=DestroyImage(clone_image); return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->matte_color=image_info->matte_color; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->custom_stream=image_info->custom_stream; (void) CopyMagickString(clone_info->magick,image_info->magick, MagickPathExtent); (void) CopyMagickString(clone_info->unique,image_info->unique, MagickPathExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MagickPathExtent); clone_info->channel=image_info->channel; (void) CloneImageOptions(clone_info,image_info); clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception); % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,source_image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) geometry->width; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0) || (source_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CopyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); image->channel_map=DestroyPixelChannelMap(image->channel_map); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelInfo *) NULL) image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info *) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); DestroyBlob(image); if (image->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); DestroyImageOptions(image_info); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) memset(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image_info->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance, &image_info->border_color,exception); (void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image_info->transparent_color,exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,const PixelMask type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % */ MagickExport Image *GetImageMask(const Image *image,const PixelMask type, ExceptionInfo *exception) { CacheView *mask_view, *image_view; Image *mask_image; MagickBooleanType status; ssize_t y; /* Get image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); switch (type) { case ReadPixelMask: { if ((image->channels & ReadMaskChannel) == 0) return((Image *) NULL); break; } case WritePixelMask: { if ((image->channels & WriteMaskChannel) == 0) return((Image *) NULL); break; } default: { if ((image->channels & CompositeMaskChannel) == 0) return((Image *) NULL); break; } } mask_image=AcquireImage((ImageInfo *) NULL,exception); status=SetImageExtent(mask_image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(mask_image)); status=MagickTrue; mask_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(mask_image,GRAYColorspace,exception); image_view=AcquireVirtualCacheView(image,exception); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { switch (type) { case ReadPixelMask: { SetPixelGray(mask_image,GetPixelReadMask(image,p),q); break; } case WritePixelMask: { SetPixelGray(mask_image,GetPixelWriteMask(image,p),q); break; } default: { SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q); break; } } p+=GetPixelChannels(image); q+=GetPixelChannels(mask_image); } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) mask_image=DestroyImage(mask_image); return(mask_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename, ExceptionInfo *exception) { char *q; int c; MagickBooleanType canonical; register const char *p; ssize_t field_width, offset; canonical=MagickFalse; offset=0; (void) CopyMagickString(filename,format,MagickPathExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } field_width=0; if (*q == '0') field_width=(ssize_t) strtol(q,&q,10); switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format-offset),(size_t) (MagickPathExtent-(p-format-offset)),p,value); offset+=(4-field_width); *q=c; (void) ConcatenateMagickString(filename,q,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MagickPathExtent]; const char *option; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; option=(const char *) NULL; if (image != (Image *) NULL) option=GetImageProperty(image,pattern,exception); if ((option == (const char *) NULL) && (image != (Image *) NULL)) option=GetImageArtifact(image,pattern); if ((option == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) option=GetImageOption(image_info,pattern); if (option == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-offset),option,(size_t) (MagickPathExtent-(p-format-offset))); offset+=strlen(pattern)-strlen(option)+3; *q=c; (void) ConcatenateMagickString(filename,r+1,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MagickPathExtent); else for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) (void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename))); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; pixel=(double) p[i]; if ((pixel < 0.0) || (pixel > QuantumRange) || (pixel != (double) ((QuantumAny) pixel))) break; } p+=GetPixelChannels(image); if (i < (ssize_t) GetPixelChannels(image)) status=MagickFalse; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MagickPathExtent], filename[MagickPathExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const PixelInfo *background, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const PixelInfo *background, ExceptionInfo *exception) { CacheView *image_view; Image *image; MagickBooleanType status; ssize_t y; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const PixelInfo *) NULL); image=AcquireImage(image_info,exception); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->alpha_trait=background->alpha_trait; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePixels() reset the image pixels, that is, all the pixel components % are zereod. % % The format of the SetImage method is: % % MagickBooleanType ResetImagePixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResetImagePixels(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; size_t length; ssize_t y; void *pixels; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); pixels=AcquirePixelCachePixels(image,&length,exception); if (pixels != (void *) NULL) { /* Reset in-core image pixels. */ (void) memset(pixels,0,length); return(MagickTrue); } /* Reset image pixels. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { (void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum)); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlpha() sets the alpha levels of the image. % % The format of the SetImageAlpha method is: % % MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: the level of transparency: 0 is fully transparent and QuantumRange % is fully opaque. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo background; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((image->background_color.alpha != OpaqueAlpha) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlphaChannel(image,OnAlphaChannel,exception); ConformPixelInfo(image,&image->background_color,&background,exception); /* Set image background color. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelMask() sets the image channel mask from the specified channel % mask. % % The format of the SetImageChannelMask method is: % % ChannelType SetImageChannelMask(Image *image, % const ChannelType channel_mask) % % A description of each parameter follows: % % o image: the image. % % o channel_mask: the channel mask. % */ MagickExport ChannelType SetImageChannelMask(Image *image, const ChannelType channel_mask) { return(SetPixelChannelMask(image,channel_mask)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image,const PixelInfo *color, % ExeptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const PixelInfo *color,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const PixelInfo *) NULL); image->colorspace=color->colorspace; image->alpha_trait=color->alpha_trait; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,color,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image->storage_class=storage_class; return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth == 0) { image->depth=8; (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } if (image->depth > (8*sizeof(MagickSizeType))) { image->depth=8*sizeof(MagickSizeType); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the 'magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, 'ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: 'image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char component[MagickPathExtent], magic[MagickPathExtent], *q; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *component='\0'; GetPathComponent(image_info->filename,SubimagePath,component); if (*component != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(component,MagickFalse) == MagickFalse) { if (IsGeometry(component) != MagickFalse) (void) CloneString(&image_info->extract,component); } else { size_t first, last; (void) CloneString(&image_info->scenes,component); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; } } *component='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,component); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*component != '\0') if ((LocaleCompare(component,"gz") == 0) || (LocaleCompare(component,"Z") == 0) || (LocaleCompare(component,"svgz") == 0) || (LocaleCompare(component,"wmz") == 0)) { char path[MagickPathExtent]; (void) CopyMagickString(path,image_info->filename,MagickPathExtent); path[strlen(path)-strlen(component)-1]='\0'; GetPathComponent(path,ExtensionPath,component); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*component != '\0') if (LocaleCompare(component,"bz2") == 0) { char path[MagickPathExtent]; (void) CopyMagickString(path,image_info->filename,MagickPathExtent); path[strlen(path)-strlen(component)-1]='\0'; GetPathComponent(path,ExtensionPath,component); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if ((*component != '\0') && (IsGlob(component) == MagickFalse)) { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,component,MagickPathExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MagickPathExtent); magick_info=GetMagickInfo(magic,sans_exception); if (frames == 0) GetPathComponent(image_info->filename,CanonicalPath,component); else GetPathComponent(image_info->filename,SubcanonicalPath,component); (void) CopyMagickString(image_info->filename,component,MagickPathExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); GetPathComponent(image_info->filename,CanonicalPath,component); (void) CopyMagickString(image_info->filename,component, MagickPathExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,component,exception); if ((LocaleCompare(component,image_info->filename) != 0) && (strchr(component,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { unsigned char *magick; size_t magick_size; /* Determine the image format from the first few bytes of the file. */ magick_size=GetMagicPatternExtent(exception); if (magick_size == 0) return(MagickFalse); image=AcquireImage(image_info,exception); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to seekable temporary file. */ *component='\0'; status=ImageToFile(image,component,exception); (void) CloseBlob(image); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,component,MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,component, MagickPathExtent); image_info->temporary=MagickTrue; } magick=(unsigned char *) AcquireMagickMemory(magick_size); if (magick == (unsigned char *) NULL) { (void) CloseBlob(image); image=DestroyImage(image); return(MagickFalse); } (void) memset(magick,0,magick_size); count=ReadBlob(image,magick_size,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic cache. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); magick=(unsigned char *) RelinquishMagickMemory(magick); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { /* Try to use magick_info that was determined earlier by the extension */ if ((magick_info != (const MagickInfo *) NULL) && (GetMagickUseExtension(magick_info) != MagickFalse) && (LocaleCompare(magick_info->magick_module,GetMagicName( magic_info)) == 0)) (void) CopyMagickString(image_info->magick,magick_info->name, MagickPathExtent); else { (void) CopyMagickString(image_info->magick,GetMagicName( magic_info),MagickPathExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); } if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o C u s t o m S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoCustomStream() sets the image info custom stream handlers. % % The format of the SetImageInfoCustomStream method is: % % void SetImageInfoCustomStream(ImageInfo *image_info, % CustomStreamInfo *custom_stream) % % A description of each parameter follows: % % o image_info: the image info. % % o custom_stream: your custom stream methods. % */ MagickExport void SetImageInfoCustomStream(ImageInfo *image_info, CustomStreamInfo *custom_stream) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->custom_stream=(CustomStreamInfo *) custom_stream; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const PixelMask type, % const Image *mask,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o mask: the image mask. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type, const Image *mask,ExceptionInfo *exception) { CacheView *mask_view, *image_view; MagickBooleanType status; ssize_t y; /* Set image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask == (const Image *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; mask_view=AcquireVirtualCacheView(mask,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(mask,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=0.0; if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows)) intensity=GetPixelIntensity(mask,p); switch (type) { case ReadPixelMask: { SetPixelReadMask(image,ClampToQuantum(intensity),q); break; } case WritePixelMask: { SetPixelWriteMask(image,ClampToQuantum(intensity),q); break; } default: { SetPixelCompositeMask(image,ClampToQuantum(intensity),q); break; } } p+=GetPixelChannels(mask); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e R e g i o n M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageRegionMask() associates a mask with the image as defined by the % specified region. % % The format of the SetImageRegionMask method is: % % MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type, % const RectangleInfo *region,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o geometry: the mask region. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageRegionMask(Image *image, const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; /* Set image mask as defined by the region. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (region == (const RectangleInfo *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; pixel=QuantumRange; if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) && ((y >= region->y) && (y < (region->y+(ssize_t) region->height)))) pixel=(Quantum) 0; switch (type) { case ReadPixelMask: { SetPixelReadMask(image,pixel,q); break; } case WritePixelMask: { SetPixelWriteMask(image,pixel,q); break; } default: { SetPixelCompositeMask(image,pixel,q); break; } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const Quantum *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(left_image,p) != TransparentAlpha) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(right_image,p) != TransparentAlpha) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const Quantum *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(top_image,p) != TransparentAlpha) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(bottom_image,p) != TransparentAlpha) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" const Image *image; Image *smush_image; MagickBooleanType proceed, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; alpha_trait=image->alpha_trait; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse) { smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(smush_image,exception); status=MagickTrue; x_offset=0; y_offset=0; for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset, y_offset,exception); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) exception; DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(Image *image,const Quantum index, MagickBooleanType *range_exception) { if ((size_t) index < image->colors) return(index); *range_exception=MagickTrue; return((Quantum) 0); } MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelInfo *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs any image_info global options into per-image % attributes. % % Note: in IMv6 free form 'options' were always mapped into 'artifacts', so % that operations and coders can find such settings. In IMv7 if a desired % per-image artifact is not set, then it will directly look for a global % option as a fallback, as such this copy is no longer needed, only the % link set up. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images,ExceptionInfo *exception) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image,exception); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->background_color, exception); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->border_color, exception); /* FUTURE: do not sync compose to per-image compose setting here */ option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); /* -- */ option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterType) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->matte_color, exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->transparent_color, exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->resolution.x=(double) ((size_t) (100.0*2.54* image->resolution.x+0.5))/100.0; image->resolution.y=(double) ((size_t) (100.0*2.54* image->resolution.y+0.5))/100.0; } break; } default: break; } image->units=units; option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } } option=GetImageOption(image_info,"virtual-pixel"); if (option != (const char *) NULL) (void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option), exception); option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } /* Pointer to allow the lookup of pre-image artifact will fallback to a global option setting/define. This saves a lot of duplication of global options into per-image artifacts, while ensuring only specifically set per-image artifacts are preserved when parenthesis ends. */ if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); image->image_info=CloneImageInfo(image_info); return(MagickTrue); }
sparse_matrix_multiplication_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_SPARSE_MATRIX_MULTIPLICATION_UTILITY_H_INCLUDED ) #define KRATOS_SPARSE_MATRIX_MULTIPLICATION_UTILITY_H_INCLUDED // System includes #include <vector> #include <math.h> #include <algorithm> #include <numeric> #ifdef _OPENMP #include <omp.h> #endif // External includes #include "amgcl/value_type/interface.hpp" // Project includes #include "includes/define.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class SparseMatrixMultiplicationUtility * @ingroup ContactStructuralMechanicsApplication * @brief An utility to multiply sparse matrix in Ublas * @details Taken and adapted for ublas from external_libraries/amgcl/detail/spgemm.hpp by Denis Demidov <dennis.demidov@gmail.com> * @todo Remove as soon as we do not depend of Ublas anymore... * @author Vicente Mataix Ferrandiz */ class SparseMatrixMultiplicationUtility { public: ///@name Type Definitions ///@{ /// Pointer definition of TreeContactSearch KRATOS_CLASS_POINTER_DEFINITION( SparseMatrixMultiplicationUtility ); /// The size type typedef std::size_t SizeType; /// The index type typedef std::size_t IndexType; /// The signed index type typedef std::ptrdiff_t SignedIndexType; /// A vector of indexes typedef DenseVector<IndexType> IndexVectorType; /// A vector of indexes (signed) typedef DenseVector<SignedIndexType> SignedIndexVectorType; ///@} ///@name Life Cycle ///@{ /// Default constructor SparseMatrixMultiplicationUtility(){}; /// Desctructor virtual ~SparseMatrixMultiplicationUtility()= default;; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Metafunction that returns value type of a matrix or a vector type. template <class T, class Enable = void> struct value_type { typedef typename T::value_type type; }; /** * @brief Matrix-matrix product C = A·B * @detail This method uses a template for each matrix * @param rA The first matrix * @param rB The second matrix * @param rC The resulting matrix */ template <class AMatrix, class BMatrix, class CMatrix> static void MatrixMultiplication( const AMatrix& rA, const BMatrix& rB, CMatrix& rC ) { #ifdef _OPENMP const int nt = omp_get_max_threads(); #else const int nt = 1; #endif if (nt > 16) { MatrixMultiplicationRMerge(rA, rB, rC); } else { MatrixMultiplicationSaad(rA, rB, rC); } } /** * @brief The first is an OpenMP-enabled modification of classic algorithm from Saad * @details It is used whenever number of OpenMP cores is 4 or less. Saad, Yousef. Iterative methods for sparse linear systems. Siam, 2003. * @param A The first matrix to multiply * @param B The second matrix to multiply * @param C The resulting matrix */ template <class AMatrix, class BMatrix, class CMatrix> static void MatrixMultiplicationSaad( const AMatrix& A, const BMatrix& B, CMatrix& C ) { typedef typename value_type<CMatrix>::type ValueType; // Auxiliar sizes const SizeType nrows = A.size1(); const SizeType ncols = B.size2(); // Exiting just in case of empty matrix if ((nrows == 0) || (ncols == 0)) return void(); // Get access to A, B and C data const IndexType* index1_a = A.index1_data().begin(); const IndexType* index2_a = A.index2_data().begin(); const double* values_a = A.value_data().begin(); const IndexType* index1_b = B.index1_data().begin(); const IndexType* index2_b = B.index2_data().begin(); const double* values_b = B.value_data().begin(); IndexType* c_ptr = new IndexType[nrows + 1]; c_ptr[0] = 0; #pragma omp parallel { SignedIndexVectorType marker(ncols); for (int i_fill = 0; i_fill < static_cast<int>(ncols); ++i_fill) marker[i_fill] = -1; #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; IndexType C_cols = 0; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; const IndexType row_begin_b = index1_b[ca]; const IndexType row_end_b = index1_b[ca+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; if (marker[cb] != ia) { marker[cb] = ia; ++C_cols; } } } c_ptr[ia + 1] = C_cols; } } // We initialize the sparse matrix std::partial_sum(c_ptr, c_ptr + nrows + 1, c_ptr); const SizeType nonzero_values = c_ptr[nrows]; IndexType* aux_index2_c = new IndexType[nonzero_values]; ValueType* aux_val_c = new ValueType[nonzero_values]; #pragma omp parallel { SignedIndexVectorType marker(ncols); for (int i_fill = 0; i_fill < static_cast<int>(ncols); ++i_fill) marker[i_fill] = -1; #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; const IndexType row_beg = c_ptr[ia]; IndexType row_end = row_beg; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; const ValueType va = values_a[ja]; const IndexType row_begin_b = index1_b[ca]; const IndexType row_end_b = index1_b[ca+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; const ValueType vb = values_b[jb]; if (marker[cb] < static_cast<SignedIndexType>(row_beg)) { marker[cb] = row_end; aux_index2_c[row_end] = cb; aux_val_c[row_end] = va * vb; ++row_end; } else { aux_val_c[marker[cb]] += va * vb; } } } } } // We reorder the rows SortRows(c_ptr, nrows, ncols, aux_index2_c, aux_val_c); // We fill the matrix CreateSolutionMatrix(C, nrows, ncols, c_ptr, aux_index2_c, aux_val_c); // Release memory delete[] c_ptr; delete[] aux_index2_c; delete[] aux_val_c; } /** * @brief Row-merge algorithm from Rupp et al. * @details The algorithm requires less memory and shows much better scalability than classic one. It is used when number of OpenMP cores is more than 4. * @param A The first matrix to multiply * @param B The second matrix to multiply * @param C The resulting matrix */ template <class AMatrix, class BMatrix, class CMatrix> static void MatrixMultiplicationRMerge( const AMatrix &A, const BMatrix &B, CMatrix &C ) { typedef typename value_type<CMatrix>::type ValueType; // Auxiliar sizes const SizeType nrows = A.size1(); const SizeType ncols = B.size2(); // Exiting just in case of empty matrix if ((nrows == 0) || (ncols == 0)) return void(); // Get access to A and B data const IndexType* index1_a = A.index1_data().begin(); const IndexType* index2_a = A.index2_data().begin(); const double* values_a = A.value_data().begin(); const IndexType* index1_b = B.index1_data().begin(); const IndexType* index2_b = B.index2_data().begin(); const double* values_b = B.value_data().begin(); IndexType max_row_width = 0; #pragma omp parallel { IndexType my_max = 0; #pragma omp for for(int i = 0; i < static_cast<int>(nrows); ++i) { const IndexType row_beg = index1_a[i]; const IndexType row_end = index1_a[i+1]; IndexType row_width = 0; for(IndexType j = row_beg; j < row_end; ++j) { const IndexType a_col = index2_a[j]; row_width += index1_b[a_col + 1] - index1_b[a_col]; } my_max = std::max(my_max, row_width); } #pragma omp critical max_row_width = std::max(max_row_width, my_max); } #ifdef _OPENMP const int nthreads = omp_get_max_threads(); #else const int nthreads = 1; #endif std::vector< std::vector<IndexType> > tmp_col(nthreads); std::vector< std::vector<ValueType> > tmp_val(nthreads); for(int i = 0; i < nthreads; ++i) { tmp_col[i].resize(3 * max_row_width); tmp_val[i].resize(2 * max_row_width); } // We create the c_ptr auxiliar variable IndexType* c_ptr = new IndexType[nrows + 1]; c_ptr[0] = 0; #pragma omp parallel { #ifdef _OPENMP const int tid = omp_get_thread_num(); #else const int tid = 0; #endif IndexType* t_col = &tmp_col[tid][0]; #pragma omp for for(int i = 0; i < static_cast<int>(nrows); ++i) { const IndexType row_beg = index1_a[i]; const IndexType row_end = index1_a[i+1]; c_ptr[i+1] = ProdRowWidth( index2_a + row_beg, index2_a + row_end, index1_b, index2_b, t_col, t_col + max_row_width, t_col + 2 * max_row_width ); } } // We initialize the sparse matrix std::partial_sum(c_ptr, c_ptr + nrows + 1, c_ptr); const SizeType nonzero_values = c_ptr[nrows]; IndexType* aux_index2_c = new IndexType[nonzero_values]; ValueType* aux_val_c = new ValueType[nonzero_values]; #pragma omp parallel { #ifdef _OPENMP const int tid = omp_get_thread_num(); #else const int tid = 0; #endif IndexType* t_col = tmp_col[tid].data(); ValueType *t_val = tmp_val[tid].data(); #pragma omp for for(int i = 0; i < static_cast<int>(nrows); ++i) { const IndexType row_beg = index1_a[i]; const IndexType row_end = index1_a[i+1]; ProdRow(index2_a + row_beg, index2_a + row_end, values_a + row_beg, index1_b, index2_b, values_b, aux_index2_c + c_ptr[i], aux_val_c + c_ptr[i], t_col, t_val, t_col + max_row_width, t_val + max_row_width ); } } // We fill the matrix CreateSolutionMatrix(C, nrows, ncols, c_ptr, aux_index2_c, aux_val_c); // Release memory delete[] c_ptr; delete[] aux_index2_c; delete[] aux_val_c; } /** * @brief The first is a method in order to sum to sparse matrices in a efficient way * @param A The resulting matrix * @param B The second matrix to sum */ template <class AMatrix, class BMatrix> static void MatrixAdd( AMatrix& A, const BMatrix& B, const double Factor = 1.0 ) { typedef typename value_type<AMatrix>::type ValueType; // Auxiliar sizes const SizeType nrows = A.size1(); const SizeType ncols = A.size2(); /* Some checks */ // Exiting just in case of empty matrix if ((nrows == 0) || (ncols == 0)) return void(); KRATOS_ERROR_IF_NOT(nrows == B.size1()) << "The second matrix has a wrong number of rows" << std::endl; KRATOS_ERROR_IF_NOT(ncols == B.size2()) << "The second matrix has a wrong number of columns" << std::endl; // Get access to A and B data const IndexType* index1_a = A.index1_data().begin(); const IndexType* index2_a = A.index2_data().begin(); const double* values_a = A.value_data().begin(); const IndexType* index1_b = B.index1_data().begin(); const IndexType* index2_b = B.index2_data().begin(); const double* values_b = B.value_data().begin(); IndexType* new_a_ptr = new IndexType[nrows + 1]; new_a_ptr[0] = 0; #pragma omp parallel { #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { SignedIndexVectorType marker(ncols); for (int i = 0; i < static_cast<int>(ncols); ++i) marker[i] = -1; // Initialize IndexType new_A_cols = 0; // Iterate over A const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; marker[ca] = 1; ++new_A_cols; } // Iterate over B const IndexType row_begin_b = index1_b[ia]; const IndexType row_end_b = index1_b[ia+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; if (marker[cb] < 0) { marker[cb] = 1; ++new_A_cols; } } new_a_ptr[ia + 1] = new_A_cols; } } // We initialize the sparse matrix std::partial_sum(new_a_ptr, new_a_ptr + nrows + 1, new_a_ptr); const SizeType nonzero_values = new_a_ptr[nrows]; IndexType* aux_index2_new_a = new IndexType[nonzero_values]; ValueType* aux_val_new_a = new ValueType[nonzero_values]; #pragma omp parallel { #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { SignedIndexVectorType marker(ncols); for (int i = 0; i < static_cast<int>(ncols); ++i) marker[i] = -1; // Initialize const IndexType row_beg = new_a_ptr[ia]; IndexType row_end = row_beg; // Iterate over A const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; const ValueType va = values_a[ja]; marker[ca] = row_end; aux_index2_new_a[row_end] = ca; aux_val_new_a[row_end] = va; ++row_end; } // Iterate over B const IndexType row_begin_b = index1_b[ia]; const IndexType row_end_b = index1_b[ia+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; const ValueType vb = values_b[jb]; if (marker[cb] < 0) { marker[cb] = row_end; aux_index2_new_a[row_end] = cb; aux_val_new_a[row_end] = Factor * vb; ++row_end; } else { aux_val_new_a[marker[cb]] += Factor * vb; } } } } // We reorder the rows SortRows(new_a_ptr, nrows, ncols, aux_index2_new_a, aux_val_new_a); // We fill the matrix CreateSolutionMatrix(A, nrows, ncols, new_a_ptr, aux_index2_new_a, aux_val_new_a); // Release memory delete[] new_a_ptr; delete[] aux_index2_new_a; delete[] aux_val_new_a; } /** * @brief This method computes of the transpose matrix of a given matrix * @param rA The resulting matrix * @param rB The second matrix to transpose */ template <class AMatrix, class BMatrix> static void TransposeMatrix( AMatrix& rA, const BMatrix& rB, const double Factor = 1.0 ) { typedef typename value_type<AMatrix>::type ValueType; // Get access to B data const IndexType* index1 = rB.index1_data().begin(); const IndexType* index2 = rB.index2_data().begin(); const ValueType* data = rB.value_data().begin(); const SizeType transpose_nonzero_values = rB.value_data().end() - rB.value_data().begin(); const SizeType size_system_1 = rB.size1(); const SizeType size_system_2 = rB.size2(); if (rA.size1() != size_system_2 || rA.size2() != size_system_1 ) { rA.resize(size_system_2, size_system_1, false); } IndexVectorType new_a_ptr(size_system_2 + 1); #pragma omp parallel for for (int i = 0; i < static_cast<int>(size_system_2 + 1); ++i) new_a_ptr[i] = 0; IndexVectorType aux_index2_new_a(transpose_nonzero_values); DenseVector<ValueType> aux_val_new_a(transpose_nonzero_values); #pragma omp parallel for for (int i=0; i<static_cast<int>(size_system_1); ++i) { IndexType row_begin = index1[i]; IndexType row_end = index1[i+1]; for (IndexType j=row_begin; j<row_end; j++) { #pragma omp atomic new_a_ptr[index2[j] + 1] += 1; } } // We initialize the blocks sparse matrix std::partial_sum(new_a_ptr.begin(), new_a_ptr.end(), &new_a_ptr[0]); IndexVectorType aux_indexes(size_system_2); #pragma omp parallel for for (int i = 0; i < static_cast<int>(size_system_2); ++i) aux_indexes[i] = 0; // #pragma omp parallel for for (int i=0; i<static_cast<int>(size_system_1); ++i) { IndexType row_begin = index1[i]; IndexType row_end = index1[i+1]; for (IndexType j=row_begin; j<row_end; j++) { const IndexType current_row = index2[j]; const IndexType initial_position = new_a_ptr[current_row]; const IndexType current_index = initial_position + aux_indexes[current_row]; aux_index2_new_a[current_index] = i; aux_val_new_a[current_index] = Factor * data[j]; // #pragma omp atomic aux_indexes[current_row] += 1; } } // We reorder the rows SortRows(&new_a_ptr[0], size_system_2, size_system_1, &aux_index2_new_a[0], &aux_val_new_a[0]); // We fill the matrix CreateSolutionMatrix(rA, size_system_2, size_system_1, &new_a_ptr[0], &aux_index2_new_a[0], &aux_val_new_a[0]); } /** * @brief This method is designed to create the final solution sparse matrix from the auxiliar values * @param C The matrix solution * @param NRows The number of rows of the matrix * @param NCols The number of columns of the matrix * @param CPtr The indexes taht indicate the number of nonzero values in each column * @param AuxIndex2C The indexes of the nonzero columns * @param AuxValC The C array containing the values of the sparse matrix */ template <class CMatrix, typename TSize, typename Ptr, typename IndexType, typename ValueType> static inline void CreateSolutionMatrix( CMatrix& C, const TSize NRows, const TSize NCols, const Ptr* CPtr, const IndexType* AuxIndex2C, const ValueType* AuxValC ) { // Exiting just in case of empty matrix if ((NRows == 0) || (NCols == 0)) return void(); // Auxiliar values const TSize nonzero_values = CPtr[NRows]; C = CMatrix(NRows, NCols, nonzero_values); IndexType* index1_c = C.index1_data().begin(); IndexType* index2_c = C.index2_data().begin(); double* values_c = C.value_data().begin(); index1_c[0] = 0; for (TSize i = 0; i < NRows; i++) index1_c[i+1] = index1_c[i] + (CPtr[i+1] - CPtr[i]); #pragma omp parallel for for (int i = 0; i < static_cast<int>(nonzero_values); i++) { KRATOS_DEBUG_ERROR_IF(AuxIndex2C[i] > static_cast<IndexType>(NCols)) << "Index " << AuxIndex2C[i] <<" is greater than the number of columns " << NCols << std::endl; index2_c[i] = AuxIndex2C[i]; values_c[i] = AuxValC[i]; } C.set_filled(NRows+1, nonzero_values); } /** * @brief This method is designed to reorder the rows by columns * @param NRows The number of rows of the matrix * @param NCols The number of columns of the matrix * @param CPtr The indexes taht indicate the number of nonzero values in each column * @param Columns The columns of the problem * @param Values The values (to be ordered with the rows) */ template <typename TSize, typename Col, typename TIndexType, typename ValueType> static inline void SortRows( const TIndexType* CPtr, const TSize NRows, const TSize NCols, Col* Columns, ValueType* Values ) { #pragma omp parallel { #pragma omp for for (int i_row=0; i_row<static_cast<int>(NRows); i_row++) { const TIndexType row_beg = CPtr[i_row]; const TIndexType row_end = CPtr[i_row + 1]; for(IndexType j = 1; j < row_end - row_beg; ++j) { const IndexType c = Columns[j + row_beg]; const double v = Values[j + row_beg]; SignedIndexType i = j - 1; while(i >= 0 && Columns[i + row_beg] > c) { KRATOS_DEBUG_ERROR_IF(Columns[i + row_beg] > static_cast<Col>(NCols)) << " Index for column: " << i + row_beg << ". Index " << Columns[i + row_beg] <<" is greater than the number of columns " << NCols << std::endl; Columns[i + 1 + row_beg] = Columns[i + row_beg]; Values[i + 1 + row_beg] = Values[i + row_beg]; i--; } Columns[i + 1 + row_beg] = c; Values[i + 1 + row_beg] = v; } } } } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const { return "SparseMatrixMultiplicationUtility"; } /// Print information about this object. void PrintInfo (std::ostream& rOStream) const { rOStream << "SparseMatrixMultiplicationUtility"; } /// Print object's data. void PrintData (std::ostream& rOStream) const { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method is oriented to merge rows * @param Column1 The index of the first matrix column * @param Column1End The last index of the first matrix column * @param Column2 The index of the second matrix column * @param Column2End The last index of the second matrix column * @param Column3 The index of the third matrix column * @return The resulting row */ template <bool TNeedOut, class TIndex> static TIndex* MergeRows( const TIndex* Column1, const TIndex* Column1End, const TIndex* Column2, const TIndex* Column2End, TIndex* Column3 ) { while(Column1 != Column1End && Column2 != Column2End) { TIndex c1 = *Column1; TIndex c2 = *Column2; if (c1 < c2) { if (TNeedOut) *Column3 = c1; ++Column1; } else if (c1 == c2) { if (TNeedOut) *Column3 = c1; ++Column1; ++Column2; } else { if (TNeedOut) *Column3 = c2; ++Column2; } ++Column3; } if (TNeedOut) { if (Column1 < Column1End) { return std::copy(Column1, Column1End, Column3); } else if (Column2 < Column2End) { return std::copy(Column2, Column2End, Column3); } else { return Column3; } } else { return Column3 + (Column1End - Column1) + (Column2End - Column2); } } /** * @brief This method is oriented to merge rows * @param rAlpha1 The coefficient of the first matrix * @param Column1 The index of the first matrix column * @param Column1End The last index of the first matrix column * @param Value1 The values of the first matrix * @param rAlpha2 The coefficient of the second matrix * @param Column2 The index of the second matrix column * @param Column2End The last index of the second matrix column * @param Value2 The values of the second matrix * @param Column3 The index of the third matrix column * @param Value3 The values of the third matrix * @return The resulting row */ template <class TIndex, class TValueType> static TIndex* MergeRows( const TValueType &rAlpha1, const TIndex* Column1, const TIndex* Column1End, const TValueType *Value1, const TValueType &rAlpha2, const TIndex* Column2, const TIndex* Column2End, const TValueType *Value2, TIndex* Column3, TValueType *Value3 ) { while(Column1 != Column1End && Column2 != Column2End) { TIndex c1 = *Column1; TIndex c2 = *Column2; if (c1 < c2) { ++Column1; *Column3 = c1; *Value3 = rAlpha1 * (*Value1++); } else if (c1 == c2) { ++Column1; ++Column2; *Column3 = c1; *Value3 = rAlpha1 * (*Value1++) + rAlpha2 * (*Value2++); } else { ++Column2; *Column3 = c2; *Value3 = rAlpha2 * (*Value2++); } ++Column3; ++Value3; } while(Column1 < Column1End) { *Column3++ = *Column1++; *Value3++ = rAlpha1 * (*Value1++); } while(Column2 < Column2End) { *Column3++ = *Column2++; *Value3++ = rAlpha2 * (*Value2++); } return Column3; } /** * @brief This method is oriented to multiply rows * @param AColumn The index of the first matrix column * @param AColumnEnd The last index of the first matrix column * @param BPtr The array constining the nonzero values per row of the second matrix * @param BColumn The index of the second matrix column * @param Column2End The last index of the second matrix column * @param Tmp1Column Indexes of the columns of first matrix * @param Tmp2Column Indexes of the columns of second matrix * @param Tmp3Column Indexes of the columns of third matrix * @return The resulting row */ template <class TIndex> static TIndex ProdRowWidth( const TIndex* AColumn, const TIndex* AColumnEnd, const TIndex* BPtr, const TIndex* BColumn, TIndex* Tmp1Column, TIndex* Tmp2Column, TIndex* Tmp3Column ) { const TIndex nrow = AColumnEnd - AColumn; /* No rows to merge, nothing to do */ if (nrow == 0) return 0; /* Single row, just copy it to output */ if (nrow == 1) return BPtr[*AColumn + 1] - BPtr[*AColumn]; /* Two rows, merge them */ if (nrow == 2) { int a1 = AColumn[0]; int a2 = AColumn[1]; return MergeRows<false>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp1Column) - Tmp1Column; } /* Generic case (more than two rows). * * Merge rows by pairs, then merge the results together. * When merging two rows, the result is always wider (or equal). * Merging by pairs allows to work with short rows as often as possible. */ // Merge first two. TIndex a1 = *AColumn++; TIndex a2 = *AColumn++; TIndex c_col1 = MergeRows<true>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp1Column ) - Tmp1Column; // Go by pairs. while(AColumn + 1 < AColumnEnd) { a1 = *AColumn++; a2 = *AColumn++; TIndex c_col2 = MergeRows<true>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp2Column ) - Tmp2Column; if (AColumn == AColumnEnd) { return MergeRows<false>( Tmp1Column, Tmp1Column + c_col1, Tmp2Column, Tmp2Column + c_col2, Tmp3Column ) - Tmp3Column; } else { c_col1 = MergeRows<true>( Tmp1Column, Tmp1Column + c_col1, Tmp2Column, Tmp2Column + c_col2, Tmp3Column ) - Tmp3Column; std::swap(Tmp1Column, Tmp3Column); } } // Merge the tail. a2 = *AColumn; return MergeRows<false>( Tmp1Column, Tmp1Column + c_col1, BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp2Column ) - Tmp2Column; } /** * @brief This method is oriented to multiply rows * @param AColumn The index of the first matrix column * @param AColumnEnd The last index of the first matrix column * @param AValue The values of the first matrix * @param BPtr The array constining the nonzero values per row of the second matrix * @param BColumn The index of the second matrix column * @param BValue The values of the second matrix * @param OutColumn Indexes of the columns of output matrix * @param OutValue Values of the columns of output matrix * @param Tmp2Column Indexes of the columns of second matrix * @param Tmp2Value Values of the columns of second matrix * @param Tmp3Column Indexes of the columns of third matrix * @param Tmp3Value Values of the columns of third matrix * @return The resulting row */ template <class TIndex, class TValueType> static void ProdRow( const TIndex* AColumn, const TIndex* AColumnEnd, const TValueType *AValue, const TIndex* BPtr, const TIndex* BColumn, const TValueType *BValue, TIndex* OutColumn, TValueType *OutValue, TIndex* Tmp2Column, TValueType *Tmp2Value, TIndex* Tmp3Column, TValueType *Tmp3Value ) { const TIndex nrow = AColumnEnd - AColumn; /* No rows to merge, nothing to do */ if (nrow == 0) return; /* Single row, just copy it to output */ if (nrow == 1) { TIndex ac = *AColumn; TValueType av = *AValue; const TValueType *bv = BValue + BPtr[ac]; const TIndex* bc = BColumn + BPtr[ac]; const TIndex* be = BColumn + BPtr[ac+1]; while(bc != be) { *OutColumn++ = *bc++; *OutValue++ = av * (*bv++); } return; } /* Two rows, merge them */ if (nrow == 2) { TIndex ac1 = AColumn[0]; TIndex ac2 = AColumn[1]; TValueType av1 = AValue[0]; TValueType av2 = AValue[1]; MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], OutColumn, OutValue ); } /* Generic case (more than two rows). * * Merge rows by pairs, then merge the results together. * When merging two rows, the result is always wider (or equal). * Merging by pairs allows to work with short rows as often as possible. */ // Merge first two. TIndex ac1 = *AColumn++; TIndex ac2 = *AColumn++; TValueType av1 = *AValue++; TValueType av2 = *AValue++; TIndex* tm1_col = OutColumn; TValueType *tm1_val = OutValue; TIndex c_col1 = MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], tm1_col, tm1_val ) - tm1_col; // Go by pairs. while(AColumn + 1 < AColumnEnd) { ac1 = *AColumn++; ac2 = *AColumn++; av1 = *AValue++; av2 = *AValue++; TIndex c_col2 = MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], Tmp2Column, Tmp2Value ) - Tmp2Column; c_col1 = MergeRows( amgcl::math::identity<TValueType>(), tm1_col, tm1_col + c_col1, tm1_val, amgcl::math::identity<TValueType>(), Tmp2Column, Tmp2Column + c_col2, Tmp2Value, Tmp3Column, Tmp3Value ) - Tmp3Column; std::swap(Tmp3Column, tm1_col); std::swap(Tmp3Value, tm1_val); } // Merge the tail if there is one. if (AColumn < AColumnEnd) { ac2 = *AColumn++; av2 = *AValue++; c_col1 = MergeRows( amgcl::math::identity<TValueType>(), tm1_col, tm1_col + c_col1, tm1_val, av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], Tmp3Column, Tmp3Value ) - Tmp3Column; std::swap(Tmp3Column, tm1_col); std::swap(Tmp3Value, tm1_val); } // If we are lucky, tm1 now points to out. // Otherwise, copy the results. if (tm1_col != OutColumn) { std::copy(tm1_col, tm1_col + c_col1, OutColumn); std::copy(tm1_val, tm1_val + c_col1, OutValue); } return; } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class SparseMatrixMultiplicationUtility ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ // /****************************** INPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::istream& operator >> (std::istream& rIStream, // SparseMatrixMultiplicationUtility& rThis); // // /***************************** OUTPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::ostream& operator << (std::ostream& rOStream, // const SparseMatrixMultiplicationUtility& rThis) // { // return rOStream; // } ///@} } // namespace Kratos. #endif // KRATOS_TREE_CONTACT_SEARCH_H_INCLUDED defined
lastprivate-clause.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num()0 #endif main(){ int i,n=7; int a[n], v; for(i=0;i<n;i++) a[i]=i+1; #pragma omp parallel for lastprivate(v) for(i=0; i<n; i++){ v=a[i]; printf ("thread %d v=%d\n ",omp_get_thread_num(),v); } printf("\nFuera de la construccion parallel for v=%d\n",v); }
Solver.h
#pragma once #include <iostream> #include "common.h" #include "Loss.h" #include "Prox.h" namespace solvers { class Solver { public: Solver(const size_t nfeatures, const std::string& loss, const std::string& prox = "none", const Double proxWeight = 0) : nfeatures_(nfeatures), w_(Vector::Zero(nfeatures_)), loss_(loss), prox_(prox), proxWeight_(proxWeight) { } virtual ~Solver() { } // delete copy constructors Solver(const Solver&) = delete; Solver& operator=(const Solver&) = delete; Solver(Solver&&) = default; Solver& operator=(Solver&&) = default; size_t nfeatures() const { return nfeatures_; } const std::string& loss() const { return loss_; } virtual Vector& w() { return w_; } virtual const Vector& w() const { return w_; } Double* wdata() { return w().data(); } Double computeSquaredNorm() const { return w().squaredNorm(); } Double computeProxPenalty() const { return Prox::computePenalty(w(), prox_); } // for dense data void predict(const size_t dataSize, Double* const outPreds, const Double* const XData) const { const MatrixMap Xmap(XData, dataSize, nfeatures_); Eigen::Map<Vector> preds(outPreds, dataSize); preds = Xmap * w(); } Double computeLoss(const size_t dataSize, const Double* const XData, const Double* const yData) const { Vector preds(dataSize); predict(dataSize, preds.data(), XData); return computeLossImpl(preds, yData); } template <typename SolverT> static void iterateBlock(SolverT& solver, const size_t blockSize, const Double* const XData, const Double* const yData, const int64_t* const idxData) { const MatrixMap Xmap(XData, blockSize, solver.nfeatures()); for (size_t i = 0; i < blockSize; ++i) { solver.iterate(Xmap.row(i), yData[i], idxData[i]); } } template <typename SolverT> static void iterateBlockIndexed(SolverT& solver, const size_t dataSize, const Double* const XData, const Double* const yData, const size_t blockSize, const int64_t* const idxData) { const MatrixMap Xmap(XData, dataSize, solver.nfeatures()); for (size_t i = 0; i < blockSize; ++i) { solver.iterate(Xmap.row(idxData[i]), yData[idxData[i]], idxData[i]); } } template <typename SolverT> static void setQ(SolverT& solver, const size_t n, const Double* const qData) { const VectorMap qMap(qData, n); solver.setQ(qMap); } // for sparse data void predict(const size_t dataSize, Double* const outPreds, const size_t nnz, const int32_t* const Xindptr, const int32_t* const Xindices, const Double* const Xvalues) const { const SpMatrixMap Xmap(dataSize, nfeatures(), nnz, Xindptr, Xindices, Xvalues); Eigen::Map<Vector> preds(outPreds, dataSize); preds = Xmap * w(); } Double computeLoss(const size_t dataSize, const size_t nnz, const int32_t* const Xindptr, const int32_t* const Xindices, const Double* const Xvalues, const Double* const yData) const { Vector preds(dataSize); predict(dataSize, preds.data(), nnz, Xindptr, Xindices, Xvalues); return computeLossImpl(preds, yData); } template <typename SolverT> static void iterateBlock(SolverT& solver, const size_t blockSize, // rows const size_t nnz, const int32_t* const Xindptr, const int32_t* const Xindices, const Double* const Xvalues, const Double* const yData, const int64_t* const idxData) { const SpMatrixMap Xmap(blockSize, solver.nfeatures(), nnz, Xindptr, Xindices, Xvalues); for (size_t i = 0; i < blockSize; ++i) { solver.iterate(Xmap.row(i), yData[i], idxData[i]); } } template <typename SolverT> static void iterateBlockIndexed(SolverT& solver, const size_t dataSize, const size_t nnz, const int32_t* const Xindptr, const int32_t* const Xindices, const Double* const Xvalues, const Double* const yData, const size_t blockSize, const int64_t* const idxData) { const SpMatrixMap Xmap(dataSize, solver.nfeatures(), nnz, Xindptr, Xindices, Xvalues); for (size_t i = 0; i < blockSize; ++i) { solver.iterate(Xmap.row(idxData[i]), yData[idxData[i]], idxData[i]); } } template <typename SolverT> static void initFromX(SolverT& solver, const size_t dataSize, const size_t nnz, const int32_t* const Xindptr, const int32_t* const Xindices, const Double* const Xvalues) { const SpMatrixMap Xmap( dataSize, solver.nfeatures(), nnz, Xindptr, Xindices, Xvalues); solver.initFromX(Xmap); } template <typename SolverT> static void initQ(SolverT& solver, const size_t dataSize, const size_t nnz, const int32_t* const Xindptr, const int32_t* const Xindices, const Double* const Xvalues) { const SpMatrixMap Xmap( dataSize, solver.nfeatures(), nnz, Xindptr, Xindices, Xvalues); solver.initQ(Xmap); } private: Double computeLossImpl(const Vector& preds, const Double* const yData) const { const size_t dataSize = preds.size(); Double loss = 0; #pragma omp parallel for reduction(+:loss) for (size_t i = 0; i < dataSize; ++i) { loss += Loss::computeLoss(loss_, preds(i), yData[i]); } return loss / preds.size(); } const size_t nfeatures_; protected: mutable Vector w_; const std::string loss_; const std::string prox_; const Double proxWeight_; }; template <typename SolverT> class OneVsRest { public: template <typename... Args> OneVsRest(const size_t nclasses, const Args&... args) : nclasses_(nclasses) { solvers_.reserve(nclasses_); for (size_t i = 0; i < nclasses_; ++i) { solvers_.emplace_back(args...); } } size_t nclasses() const { return nclasses_; } void startDecay() { for (auto& solver : solvers_) { solver.startDecay(); } } void decay(const Double multiplier = 0.5) { for (auto& solver : solvers_) { solver.decay(multiplier); } } void iterateBlock(const size_t blockSize, const Double* const XData, const int32_t* const yData, const int64_t* const idxData) { const MatrixMap Xmap(XData, blockSize, solvers_.front().nfeatures()); #pragma omp parallel for for (size_t c = 0; c < nclasses_; ++c) { for (size_t i = 0; i < blockSize; ++i) { solvers_[c].iterate( Xmap.row(i), static_cast<Double>(yData[i] == static_cast<int32_t>(c)), idxData[i]); } } } void iterateBlockIndexed(const size_t dataSize, const Double* const XData, const int32_t* const yData, const size_t blockSize, const int64_t* const idxData) { const MatrixMap Xmap(XData, dataSize, solvers_.front().nfeatures()); #pragma omp parallel for for (size_t c = 0; c < nclasses_; ++c) { for (size_t i = 0; i < blockSize; ++i) { solvers_[c].iterate( Xmap.row(idxData[i]), static_cast<Double>(yData[idxData[i]] == static_cast<int32_t>(c)), idxData[i]); } } } template <typename... Args> void predict(const size_t dataSize, int32_t* const out, Args... Xargs) const { Matrix preds(nclasses_, dataSize); #pragma omp parallel for for (size_t c = 0; c < nclasses_; ++c) { solvers_[c].predict(dataSize, preds.row(c).data(), Xargs...); } #pragma omp parallel for for (size_t i = 0; i < dataSize; ++i) { out[i] = 0; Double m = preds(0, i); for (size_t c = 1; c < nclasses_; ++c) { if (preds(c, i) > m) { m = preds(c, i); out[i] = c; } } } } Double computeLoss(const size_t dataSize, const Double* const XData, const int32_t* const yData) const { Matrix preds(nclasses_, dataSize); #pragma omp parallel for for (size_t c = 0; c < nclasses_; ++c) { solvers_[c].predict(dataSize, preds.row(c).data(), XData); } return computeLossImpl(preds, yData); } Double computeSquaredNorm() const { Double res = 0; #pragma omp parallel for reduction(+:res) for (size_t c = 0; c < nclasses_; ++c) { res += solvers_[c].w().squaredNorm(); } return res; } Double computeProxPenalty() const { Double res = 0; #pragma omp parallel for reduction(+:res) for (size_t c = 0; c < nclasses_; ++c) { res += solvers_[c].computeProxPenalty(); } return res; } private: Double computeLossImpl(const Matrix& preds, const int32_t* const yData) const { const size_t dataSize = preds.cols(); Double loss = 0; #pragma omp parallel for reduction(+:loss) for (size_t i = 0; i < dataSize; ++i) { Double l = 0; for (size_t c = 0; c < nclasses_; ++c) { l += Loss::computeLoss( solvers_[0].loss(), preds(c, i), static_cast<Double>(yData[i] == static_cast<int32_t>(c))); } loss += l; } return loss / dataSize; } const size_t nclasses_; std::vector<SolverT> solvers_; }; }
mrcore.c
/*************************************************************************** * Copyright 2013 CertiVox UK Ltd. * * This file is part of CertiVox MIRACL Crypto SDK. * * The CertiVox MIRACL Crypto SDK provides developers with an * extensive and efficient set of cryptographic functions. * For further information about its features and functionalities please * refer to http://www.certivox.com * * * The CertiVox MIRACL Crypto SDK is free software: you can * redistribute it and/or modify it under the terms of the * GNU Affero General Public License as published by the * Free Software Foundation, either version 3 of the License, * or (at your option) any later version. * * * The CertiVox MIRACL Crypto SDK is distributed in the hope * that it will be useful, but WITHOUT ANY WARRANTY; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Affero General Public License for more details. * * * You should have received a copy of the GNU Affero General Public * License along with CertiVox MIRACL Crypto SDK. * If not, see <http://www.gnu.org/licenses/>. * * You can be released from the requirements of the license by purchasing * a commercial license. Buying such a license is mandatory as soon as you * develop commercial activities involving the CertiVox MIRACL Crypto SDK * without disclosing the source code of your own applications, or shipping * the CertiVox MIRACL Crypto SDK with a closed source product. * * ***************************************************************************/ /* * * MIRACL Core module - contains initialisation code and general purpose * utilities * mrcore.c * * Space can be saved by removing unneeded functions (mr_and ?) * */ #include "miracl.h" #include <stdlib.h> #include <string.h> #ifdef MR_FP #include <math.h> #endif /*** Multi-Threaded Support ***/ #ifndef MR_GENERIC_MT #ifdef MR_OPENMP_MT #include <omp.h> #define MR_MIP_EXISTS miracl *mr_mip; #pragma omp threadprivate(mr_mip) miracl *get_mip() { return mr_mip; } void mr_init_threading() { } void mr_end_threading() { } #endif #ifdef MR_WINDOWS_MT #include <windows.h> DWORD mr_key; miracl *get_mip() { return (miracl *)TlsGetValue(mr_key); } void mr_init_threading() { mr_key=TlsAlloc(); } void mr_end_threading() { TlsFree(mr_key); } #endif #ifdef MR_UNIX_MT #include <pthread.h> pthread_key_t mr_key; miracl *get_mip() { return (miracl *)pthread_getspecific(mr_key); } void mr_init_threading() { pthread_key_create(&mr_key,(void(*)(void *))NULL); } void mr_end_threading() { pthread_key_delete(mr_key); } #endif #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_OPENMP_MT #ifdef MR_STATIC miracl mip; miracl *mr_mip=&mip; #else miracl *mr_mip=NULL; /* MIRACL's one and only global variable */ #endif #define MR_MIP_EXISTS miracl *get_mip() { return (miracl *)mr_mip; } #endif #endif #endif #ifdef MR_MIP_EXISTS void set_mip(miracl *mip) { mr_mip=mip; } #endif #endif /* See Advanced Windows by Jeffrey Richter, Chapter 12 for methods for creating different instances of this global for each executing thread when using Windows '95/NT */ #ifdef MR_STATIC #if MIRACL==8 static const int mr_small_primes[]= {2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103, 107,109,113,127,0}; #else static const int mr_small_primes[]= {2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103, 107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211, 223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331, 337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449, 457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587, 593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,709, 719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,827,829,839,853, 857,859,863,877,881,883,887,907,911,919,929,937,941,947,953,967,971,977,983,991, 997,0}; #endif #endif #ifndef MR_STRIPPED_DOWN #ifndef MR_NO_STANDARD_IO static char *names[] = {(char *)"your program",(char *)"innum",(char *)"otnum",(char *)"jack",(char *)"normalise", (char *)"multiply",(char *)"divide",(char *)"incr",(char *)"decr",(char *)"premult", (char *)"subdiv",(char *)"fdsize",(char *)"egcd",(char *)"cbase", (char *)"cinnum",(char *)"cotnum",(char *)"nroot",(char *)"power", (char *)"powmod",(char *)"bigdig",(char *)"bigrand",(char *)"nxprime",(char *)"isprime", (char *)"mirvar",(char *)"mad",(char *)"multi_inverse",(char *)"putdig", (char *)"add",(char *)"subtract",(char *)"mirsys",(char *)"xgcd", (char *)"fpack",(char *)"dconv",(char *)"mr_shift",(char *)"mround",(char *)"fmul", (char *)"fdiv",(char *)"fadd",(char *)"fsub",(char *)"fcomp",(char *)"fconv", (char *)"frecip",(char *)"fpmul",(char *)"fincr",(char *)"",(char *)"ftrunc", (char *)"frand",(char *)"sftbit",(char *)"build",(char *)"logb2",(char *)"expint", (char *)"fpower",(char *)"froot",(char *)"fpi",(char *)"fexp",(char *)"flog",(char *)"fpowf", (char *)"ftan",(char *)"fatan",(char *)"fsin",(char *)"fasin",(char *)"fcos",(char *)"facos", (char *)"ftanh",(char *)"fatanh",(char *)"fsinh",(char *)"fasinh",(char *)"fcosh", (char *)"facosh",(char *)"flop",(char *)"gprime",(char *)"powltr",(char *)"fft_mult", (char *)"crt_init",(char *)"crt",(char *)"otstr",(char *)"instr",(char *)"cotstr",(char *)"cinstr",(char *)"powmod2", (char *)"prepare_monty",(char *)"nres",(char *)"redc",(char *)"nres_modmult",(char *)"nres_powmod", (char *)"nres_moddiv",(char *)"nres_powltr",(char *)"divisible",(char *)"remain", (char *)"fmodulo",(char *)"nres_modadd",(char *)"nres_modsub",(char *)"nres_negate", (char *)"ecurve_init",(char *)"ecurve_add",(char *)"ecurve_mult", (char *)"epoint_init",(char *)"epoint_set",(char *)"epoint_get",(char *)"nres_powmod2", (char *)"nres_sqroot",(char *)"sqroot",(char *)"nres_premult",(char *)"ecurve_mult2", (char *)"ecurve_sub",(char *)"trial_division",(char *)"nxsafeprime",(char *)"nres_lucas",(char *)"lucas", (char *)"brick_init",(char *)"pow_brick",(char *)"set_user_function", (char *)"nres_powmodn",(char *)"powmodn",(char *)"ecurve_multn", (char *)"ebrick_init",(char *)"mul_brick",(char *)"epoint_norm",(char *)"nres_multi_inverse",(char *)"", (char *)"nres_dotprod",(char *)"epoint_negate",(char *)"ecurve_multi_add", (char *)"ecurve2_init",(char *)"",(char *)"epoint2_set",(char *)"epoint2_norm",(char *)"epoint2_get", (char *)"epoint2_comp",(char *)"ecurve2_add",(char *)"epoint2_negate",(char *)"ecurve2_sub", (char *)"ecurve2_multi_add",(char *)"ecurve2_mult",(char *)"ecurve2_multn",(char *)"ecurve2_mult2", (char *)"ebrick2_init",(char *)"mul2_brick",(char *)"prepare_basis",(char *)"strong_bigrand", (char *)"bytes_to_big",(char *)"big_to_bytes",(char *)"set_io_buffer_size", (char *)"epoint_getxyz",(char *)"epoint_double_add",(char *)"nres_double_inverse", (char *)"double_inverse",(char *)"epoint_x",(char *)"hamming",(char *)"expb2",(char *)"bigbits", (char *)"nres_lazy",(char *)"zzn2_imul",(char *)"nres_double_modadd",(char *)"nres_double_modsub", /*155*/(char *)"",(char *)"zzn2_from_int",(char *)"zzn2_negate",(char *)"zzn2_conj",(char *)"zzn2_add", (char *)"zzn2_sub",(char *)"zzn2_smul",(char *)"zzn2_mul",(char *)"zzn2_inv",(char *)"zzn2_timesi",(char *)"zzn2_powl", (char *)"zzn2_from_bigs",(char *)"zzn2_from_big",(char *)"zzn2_from_ints", (char *)"zzn2_sadd",(char *)"zzn2_ssub",(char *)"zzn2_times_irp",(char *)"zzn2_div2", (char *)"zzn3_from_int",(char *)"zzn3_from_ints",(char *)"zzn3_from_bigs", (char *)"zzn3_from_big",(char *)"zzn3_negate",(char *)"zzn3_powq",(char *)"zzn3_init", (char *)"zzn3_add",(char *)"zzn3_sadd",(char *)"zzn3_sub",(char *)"zzn3_ssub",(char *)"zzn3_smul", (char *)"zzn3_imul",(char *)"zzn3_mul",(char *)"zzn3_inv",(char *)"zzn3_div2",(char *)"zzn3_timesi", (char *)"epoint_multi_norm",(char *)"mr_jsf",(char *)"epoint2_multi_norm", (char *)"ecn2_compare",(char *)"ecn2_norm",(char *)"ecn2_set",(char *)"zzn2_txx", (char *)"zzn2_txd",(char *)"nres_div2",(char *)"nres_div3",(char *)"zzn2_div3", (char *)"ecn2_setx",(char *)"ecn2_rhs",(char *)"zzn2_qr",(char *)"zzn2_sqrt",(char *)"ecn2_add",(char *)"ecn2_mul2_jsf",(char *)"ecn2_mul", (char *)"nres_div5",(char *)"zzn2_div5",(char *)"zzn2_sqr",(char *)"ecn2_add_sub",(char *)"ecn2_psi",(char *)"invmodp", (char *)"zzn2_multi_inverse",(char *)"ecn2_multi_norm",(char *)"ecn2_precomp",(char *)"ecn2_mul4_gls_v", (char *)"ecn2_mul2",(char *)"ecn2_precomp_gls",(char *)"ecn2_mul2_gls", (char *)"ecn2_brick_init",(char *)"ecn2_mul_brick_gls",(char *)"ecn2_multn",(char *)"zzn3_timesi2", (char *)"nres_complex",(char *)"zzn4_from_int",(char *)"zzn4_negate",(char *)"zzn4_conj",(char *)"zzn4_add",(char *)"zzn4_sadd",(char *)"zzn4_sub",(char *)"zzn4_ssub",(char *)"zzn4_smul",(char *)"zzn4_sqr", (char *)"zzn4_mul",(char *)"zzn4_inv",(char *)"zzn4_div2",(char *)"zzn4_powq",(char *)"zzn4_tx",(char *)"zzn4_imul",(char *)"zzn4_lmul",(char *)"zzn4_from_big", (char *)"ecn2_mult4"}; /* 0 - 243 (244 in all) */ #endif #endif #ifdef MR_NOASM /* C only versions of muldiv/muldvd/muldvd2/muldvm */ /* Note that mr_large should be twice the size of mr_small */ mr_small muldiv(mr_small a,mr_small b,mr_small c,mr_small m,mr_small *rp) { mr_small q; mr_large ldres,p=(mr_large)a*b+c; q=(mr_small)(MR_LROUND(p/m)); *rp=(mr_small)(p-(mr_large)q*m); return q; } #ifdef MR_FP_ROUNDING mr_small imuldiv(mr_small a,mr_small b,mr_small c,mr_small m,mr_large im,mr_small *rp) { mr_small q; mr_large ldres,p=(mr_large)a*b+c; q=(mr_small)MR_LROUND(p*im); *rp=(mr_small)(p-(mr_large)q*m); return q; } #endif #ifndef MR_NOFULLWIDTH mr_small muldvm(mr_small a,mr_small c,mr_small m,mr_small *rp) { mr_small q; union doubleword dble; dble.h[MR_BOT]=c; dble.h[MR_TOP]=a; q=(mr_small)(dble.d/m); *rp=(mr_small)(dble.d-(mr_large)q*m); return q; } mr_small muldvd(mr_small a,mr_small b,mr_small c,mr_small *rp) { union doubleword dble; dble.d=(mr_large)a*b+c; *rp=dble.h[MR_BOT]; return dble.h[MR_TOP]; } void muldvd2(mr_small a,mr_small b,mr_small *c,mr_small *rp) { union doubleword dble; dble.d=(mr_large)a*b+*c+*rp; *rp=dble.h[MR_BOT]; *c=dble.h[MR_TOP]; } #endif #endif #ifdef MR_NOFULLWIDTH /* no FULLWIDTH working, so supply dummies */ /* mr_small muldvd(mr_small a,mr_small b,mr_small c,mr_small *rp) { return (mr_small)0; } mr_small muldvm(mr_small a,mr_small c,mr_small m,mr_small *rp) { return (mr_small)0; } void muldvd2(mr_small a,mr_small b,mr_small *c,mr_small *rp) { } */ #endif #ifndef MR_NO_STANDARD_IO static void mputs(char *s) { /* output a string */ int i=0; while (s[i]!=0) fputc((int)s[i++],stdout); } #endif void mr_berror(_MIPD_ int nerr) { /* Big number error routine */ #ifndef MR_STRIPPED_DOWN int i; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERCON) { mr_mip->ERNUM=nerr; return; } #ifndef MR_NO_STANDARD_IO #ifndef MR_STRIPPED_DOWN mputs((char *)"\nMIRACL error from routine "); if (mr_mip->depth<MR_MAXDEPTH) mputs(names[mr_mip->trace[mr_mip->depth]]); else mputs((char *)"???"); fputc('\n',stdout); for (i=mr_mip->depth-1;i>=0;i--) { mputs((char *)" called from "); if (i<MR_MAXDEPTH) mputs(names[mr_mip->trace[i]]); else mputs((char *)"???"); fputc('\n',stdout); } switch (nerr) { case 1 : mputs((char *)"Number base too big for representation\n"); break; case 2 : mputs((char *)"Division by zero attempted\n"); break; case 3 : mputs((char *)"Overflow - Number too big\n"); break; case 4 : mputs((char *)"Internal result is negative\n"); break; case 5 : mputs((char *)"Input format error\n"); break; case 6 : mputs((char *)"Illegal number base\n"); break; case 7 : mputs((char *)"Illegal parameter usage\n"); break; case 8 : mputs((char *)"Out of space\n"); break; case 9 : mputs((char *)"Even root of a negative number\n"); break; case 10: mputs((char *)"Raising integer to negative power\n"); break; case 11: mputs((char *)"Attempt to take illegal root\n"); break; case 12: mputs((char *)"Integer operation attempted on Flash number\n"); break; case 13: mputs((char *)"Flash overflow\n"); break; case 14: mputs((char *)"Numbers too big\n"); break; case 15: mputs((char *)"Log of a non-positive number\n"); break; case 16: mputs((char *)"Flash to double conversion failure\n"); break; case 17: mputs((char *)"I/O buffer overflow\n"); break; case 18: mputs((char *)"MIRACL not initialised - no call to mirsys()\n"); break; case 19: mputs((char *)"Illegal modulus \n"); break; case 20: mputs((char *)"No modulus defined\n"); break; case 21: mputs((char *)"Exponent too big\n"); break; case 22: mputs((char *)"Unsupported Feature - check mirdef.h\n"); break; case 23: mputs((char *)"Specified double length type isn't double length\n"); break; case 24: mputs((char *)"Specified basis is NOT irreducible\n"); break; case 25: mputs((char *)"Unable to control Floating-point rounding\n"); break; case 26: mputs((char *)"Base must be binary (MR_ALWAYS_BINARY defined in mirdef.h ?)\n"); break; case 27: mputs((char *)"No irreducible basis defined\n"); break; case 28: mputs((char *)"Composite modulus\n"); break; case 29: mputs((char *)"Input/output error when reading from RNG device node\n"); break; default: mputs((char *)"Undefined error\n"); break; } exit(0); #else mputs((char *)"MIRACL error\n"); exit(0); #endif #endif } #ifndef MR_STRIPPED_DOWN void mr_track(_MIPDO_ ) { /* track course of program execution * * through the MIRACL routines */ #ifndef MR_NO_STANDARD_IO int i; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif for (i=0;i<mr_mip->depth;i++) fputc('-',stdout); fputc('>',stdout); mputs(names[mr_mip->trace[mr_mip->depth]]); fputc('\n',stdout); #endif } #endif #ifndef MR_NO_RAND mr_small brand(_MIPDO_ ) { /* Marsaglia & Zaman random number generator */ int i,k; mr_unsign32 pdiff,t; mr_small r; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->lg2b>32) { /* underlying type is > 32 bits. Assume <= 64 bits */ mr_mip->rndptr+=2; if (mr_mip->rndptr<NK-1) { r=(mr_small)mr_mip->ira[mr_mip->rndptr]; r=mr_shiftbits(r,mr_mip->lg2b-32); r+=(mr_small)mr_mip->ira[mr_mip->rndptr+1]; return r; } } else { mr_mip->rndptr++; if (mr_mip->rndptr<NK) return (mr_small)mr_mip->ira[mr_mip->rndptr]; } mr_mip->rndptr=0; for (i=0,k=NK-NJ;i<NK;i++,k++) { /* calculate next NK values */ if (k==NK) k=0; t=mr_mip->ira[k]; pdiff=t - mr_mip->ira[i] - mr_mip->borrow; if (pdiff<t) mr_mip->borrow=0; if (pdiff>t) mr_mip->borrow=1; mr_mip->ira[i]=pdiff; } if (mr_mip->lg2b>32) { /* double up */ r=(mr_small)mr_mip->ira[0]; r=mr_shiftbits(r,mr_mip->lg2b-32); r+=(mr_small)mr_mip->ira[1]; return r; } else return (mr_small)(mr_mip->ira[0]); } void irand(_MIPD_ mr_unsign32 seed) { /* initialise random number system */ int i,in; mr_unsign32 t,m=1L; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif mr_mip->borrow=0L; mr_mip->rndptr=0; mr_mip->ira[0]=seed; for (i=1;i<NK;i++) { /* fill initialisation vector */ in=(NV*i)%NK; mr_mip->ira[in]=m; t=m; m=seed-m; seed=t; } for (i=0;i<1000;i++) brand(_MIPPO_ ); /* "warm-up" & stir the generator */ } #endif mr_small mr_shiftbits(mr_small x,int n) { #ifdef MR_FP int i; mr_small dres; if (n==0) return x; if (n>0) { for (i=0;i<n;i++) x=x+x; return x; } n=-n; for (i=0;i<n;i++) x=MR_DIV(x,2.0); return x; #else if (n==0) return x; if (n>0) x<<=n; else x>>=(-n); return x; #endif } mr_small mr_setbase(_MIPD_ mr_small nb) { /* set base. Pack as many digits as * * possible into each computer word */ mr_small temp; #ifdef MR_FP mr_small dres; #endif #ifndef MR_NOFULLWIDTH BOOL fits; int bits; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif fits=FALSE; bits=MIRACL; while (bits>1) { bits/=2; temp=((mr_small)1<<bits); if (temp==nb) { fits=TRUE; break; } if (temp<nb || (bits%2)!=0) break; } if (fits) { mr_mip->apbase=nb; mr_mip->pack=MIRACL/bits; mr_mip->base=0; return 0; } #endif mr_mip->apbase=nb; mr_mip->pack=1; mr_mip->base=nb; #ifdef MR_SIMPLE_BASE return 0; #else if (mr_mip->base==0) return 0; temp=MR_DIV(MAXBASE,nb); while (temp>=nb) { temp=MR_DIV(temp,nb); mr_mip->base*=nb; mr_mip->pack++; } #ifdef MR_FP_ROUNDING mr_mip->inverse_base=mr_invert(mr_mip->base); return mr_mip->inverse_base; #else return 0; #endif #endif } #ifdef MR_FLASH BOOL fit(big x,big y,int f) { /* returns TRUE if x/y would fit flash format of length f */ int n,d; n=(int)(x->len&(MR_OBITS)); d=(int)(y->len&(MR_OBITS)); if (n==1 && x->w[0]==1) n=0; if (d==1 && y->w[0]==1) d=0; if (n+d<=f) return TRUE; return FALSE; } #endif int mr_lent(flash x) { /* return length of big or flash in words */ mr_lentype lx; lx=(x->len&(MR_OBITS)); #ifdef MR_FLASH return (int)((lx&(MR_MSK))+((lx>>(MR_BTS))&(MR_MSK))); #else return (int)lx; #endif } void zero(flash x) { /* set big/flash number to zero */ int i,n; mr_small *g; if (x==NULL) return; #ifdef MR_FLASH n=mr_lent(x); #else n=(x->len&MR_OBITS); #endif g=x->w; for (i=0;i<n;i++) g[i]=0; x->len=0; } void uconvert(_MIPD_ unsigned int n ,big x) { /* convert unsigned integer n to big number format */ int m; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH #if MR_IBITS > MIRACL while (n>0) { x->w[m++]=(mr_small)(n%((mr_small)1<<(MIRACL))); n/=((mr_small)1<<(MIRACL)); } #else x->w[m++]=(mr_small)n; #endif #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=MR_REMAIN((mr_small)n,mr_mip->base); n=(unsigned int)((mr_small)n/mr_mip->base); } #endif x->len=m; } void tconvert(_MIPD_ mr_utype n,big x) { mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } x->w[0]=n; x->len=1; x->len|=s; } void convert(_MIPD_ int n ,big x) { /* convert signed integer n to big number format */ mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } uconvert(_MIPP_ (unsigned int)n,x); x->len|=s; } #ifndef MR_STATIC #ifdef mr_dltype void dlconv(_MIPD_ mr_dltype n,big x) { /* convert double length integer to big number format - rarely needed */ int m; mr_lentype s; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; s=0; if (n<0) { s=MR_MSBIT; n=(-n); } m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH while (n>0) { x->w[m++]=(mr_small)(n%((mr_dltype)1<<(MIRACL))); n/=((mr_dltype)1<<(MIRACL)); } #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=(mr_small)MR_REMAIN(n,mr_mip->base); n/=mr_mip->base; } #endif x->len=(m|s); } #endif void ulgconv(_MIPD_ unsigned long n,big x) { /* convert unsigned long integer to big number format - rarely needed */ int m; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH #if MR_LBITS > MIRACL while (n>0) { x->w[m++]=(mr_small)(n%(1L<<(MIRACL))); n/=(1L<<(MIRACL)); } #else x->w[m++]=(mr_small)n; #endif #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=MR_REMAIN(n,mr_mip->base); n=(unsigned long)((mr_small)n/mr_mip->base); } #endif x->len=m; } void lgconv(_MIPD_ long n,big x) { /* convert signed long integer to big number format - rarely needed */ mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } ulgconv(_MIPP_ (unsigned long)n,x); x->len|=s; } flash mirvar(_MIPD_ int iv) { /* initialize big/flash number */ flash x; int align; char *ptr; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; MR_IN(23); if (!(mr_mip->active)) { mr_berror(_MIPP_ MR_ERR_NO_MIRSYS); MR_OUT return NULL; } /* OK, now I control alignment.... */ /* Allocate space for big, the length, the pointer, and the array */ /* Do it all in one memory allocation - this is quicker */ /* Ensure that the array has correct alignment */ x=(big)mr_alloc(_MIPP_ mr_size(mr_mip->nib-1),1); if (x==NULL) { MR_OUT return x; } ptr=(char *)&x->w; align=(unsigned long)(ptr+sizeof(mr_small *))%sizeof(mr_small); x->w=(mr_small *)(ptr+sizeof(mr_small *)+sizeof(mr_small)-align); if (iv!=0) convert(_MIPP_ iv,x); MR_OUT return x; } #endif flash mirvar_mem_variable(char *mem,int index,int sz) { flash x; int align; char *ptr; int offset,r; /* alignment */ offset=0; r=(unsigned long)mem%MR_SL; if (r>0) offset=MR_SL-r; x=(big)&mem[offset+mr_size(sz)*index]; ptr=(char *)&x->w; align=(unsigned long)(ptr+sizeof(mr_small *))%sizeof(mr_small); x->w=(mr_small *)(ptr+sizeof(mr_small *)+sizeof(mr_small)-align); return x; } flash mirvar_mem(_MIPD_ char *mem,int index) { /* initialize big/flash number from pre-allocated memory */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; return mirvar_mem_variable(mem,index,mr_mip->nib-1); } void set_user_function(_MIPD_ BOOL (*user)(void)) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(111) if (!(mr_mip->active)) { mr_berror(_MIPP_ MR_ERR_NO_MIRSYS); MR_OUT return; } mr_mip->user=user; MR_OUT } #ifndef MR_STATIC #ifndef MR_SIMPLE_IO void set_io_buffer_size(_MIPD_ int len) { int i; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (len<0) return; MR_IN(142) for (i=0;i<mr_mip->IOBSIZ;i++) mr_mip->IOBUFF[i]=0; mr_free(mr_mip->IOBUFF); if (len==0) { MR_OUT return; } mr_mip->IOBSIZ=len; mr_mip->IOBUFF=(char *)mr_alloc(_MIPP_ len+1,1); mr_mip->IOBUFF[0]='\0'; MR_OUT } #endif #endif /* Initialise a big from ROM given its fixed length */ BOOL init_big_from_rom(big x,int len,const mr_small *rom,int romsize,int *romptr) { int i; zero(x); x->len=len; for (i=0;i<len;i++) { if (*romptr>=romsize) return FALSE; #ifdef MR_AVR x->w[i]=pgm_read_byte_near(&rom[*romptr]); #else x->w[i]=rom[*romptr]; #endif (*romptr)++; } mr_lzero(x); return TRUE; } /* Initialise an elliptic curve point from ROM */ BOOL init_point_from_rom(epoint *P,int len,const mr_small *rom,int romsize,int *romptr) { if (!init_big_from_rom(P->X,len,rom,romsize,romptr)) return FALSE; if (!init_big_from_rom(P->Y,len,rom,romsize,romptr)) return FALSE; P->marker=MR_EPOINT_NORMALIZED; return TRUE; } #ifdef MR_GENERIC_AND_STATIC miracl *mirsys(miracl *mr_mip,int nd,mr_small nb) #else miracl *mirsys(int nd,mr_small nb) #endif { /* Initialize MIRACL system to * * use numbers to base nb, and * * nd digits or (-nd) bytes long */ /* In these cases mr_mip is passed as the first parameter */ #ifdef MR_GENERIC_AND_STATIC return mirsys_basic(mr_mip,nd,nb); #endif #ifdef MR_GENERIC_MT #ifndef MR_STATIC miracl *mr_mip=mr_first_alloc(); return mirsys_basic(mr_mip,nd,nb); #endif #endif /* In these cases mr_mip is a "global" pointer and the mip itself is allocated from the heap. In fact mr_mip (and mip) may be thread specific if some multi-threading scheme is implemented */ #ifndef MR_STATIC #ifdef MR_WINDOWS_MT miracl *mr_mip=mr_first_alloc(); TlsSetValue(mr_key,mr_mip); #endif #ifdef MR_UNIX_MT miracl *mr_mip=mr_first_alloc(); pthread_setspecific(mr_key,mr_mip); #endif #ifdef MR_OPENMP_MT mr_mip=mr_first_alloc(); #endif #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_OPENMP_MT mr_mip=mr_first_alloc(); #endif #endif #endif #endif #ifndef MR_GENERIC_MT mr_mip=get_mip(); #endif return mirsys_basic(mr_mip,nd,nb); } miracl *mirsys_basic(miracl *mr_mip,int nd,mr_small nb) { #ifndef MR_NO_RAND int i; #endif mr_small b,nw; #ifdef MR_FP mr_small dres; #endif if (mr_mip==NULL) return NULL; #ifndef MR_STRIPPED_DOWN mr_mip->depth=0; mr_mip->trace[0]=0; mr_mip->depth++; mr_mip->trace[mr_mip->depth]=29; #endif /* digest hardware configuration */ #ifdef MR_NO_STANDARD_IO mr_mip->ERCON=TRUE; #else mr_mip->ERCON=FALSE; #endif #ifndef MR_STATIC mr_mip->logN=0; mr_mip->degree=0; mr_mip->chin.NP=0; #endif mr_mip->user=NULL; mr_mip->same=FALSE; mr_mip->first_one=FALSE; mr_mip->debug=FALSE; mr_mip->AA=0; #ifndef MR_AFFINE_ONLY mr_mip->coord=MR_NOTSET; #endif #ifdef MR_NOFULLWIDTH if (nb==0) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #endif #ifndef MR_FP #ifdef mr_dltype #ifndef MR_NOFULLWIDTH if (sizeof(mr_dltype)<2*sizeof(mr_utype)) { /* double length type, isn't */ mr_berror(_MIPP_ MR_ERR_NOT_DOUBLE_LEN); MR_OUT return mr_mip; } #endif #endif #endif if (nb==1 || nb>MAXBASE) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #ifdef MR_FP_ROUNDING if (mr_setbase(_MIPP_ nb)==0) { /* unable in fact to control FP rounding */ mr_berror(_MIPP_ MR_ERR_NO_ROUNDING); MR_OUT return mr_mip; } #else mr_setbase(_MIPP_ nb); #endif b=mr_mip->base; #ifdef MR_SIMPLE_BASE if (b!=0) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #endif mr_mip->lg2b=0; mr_mip->base2=1; #ifndef MR_SIMPLE_BASE if (b==0) { #endif mr_mip->lg2b=MIRACL; mr_mip->base2=0; #ifndef MR_SIMPLE_BASE } else while (b>1) { b=MR_DIV(b,2); mr_mip->lg2b++; mr_mip->base2*=2; } #endif #ifdef MR_ALWAYS_BINARY if (mr_mip->base!=mr_mip->base2) { mr_berror(_MIPP_ MR_ERR_NOT_BINARY); MR_OUT return mr_mip; } #endif /* calculate total space for bigs */ /* big -> |int len|small *ptr| alignment space | size in words +1| alignment up to multiple of 4 | */ if (nd>0) nw=MR_ROUNDUP(nd,mr_mip->pack); else nw=MR_ROUNDUP(8*(-nd),mr_mip->lg2b); if (nw<1) nw=1; mr_mip->nib=(int)(nw+1); /* add one extra word for small overflows */ #ifdef MR_STATIC if (nw>MR_STATIC) { mr_berror(_MIPP_ MR_ERR_TOO_BIG); MR_OUT return mr_mip; } #endif /* mr_mip->nib=(int)(nw+1); add one extra word for small overflows */ #ifdef MR_FLASH mr_mip->workprec=mr_mip->nib; mr_mip->stprec=mr_mip->nib; while (mr_mip->stprec>2 && mr_mip->stprec>MR_FLASH/mr_mip->lg2b) mr_mip->stprec=(mr_mip->stprec+1)/2; if (mr_mip->stprec<2) mr_mip->stprec=2; #endif #ifndef MR_DOUBLE_BIG mr_mip->check=ON; #else mr_mip->check=OFF; #endif #ifndef MR_SIMPLE_BASE #ifndef MR_SIMPLE_IO mr_mip->IOBASE=10; /* defaults */ #endif #endif mr_mip->ERNUM=0; mr_mip->NTRY=6; mr_mip->MONTY=ON; #ifdef MR_FLASH mr_mip->EXACT=TRUE; mr_mip->RPOINT=OFF; #endif #ifndef MR_STRIPPED_DOWN mr_mip->TRACER=OFF; #endif #ifndef MR_SIMPLE_IO mr_mip->INPLEN=0; mr_mip->IOBSIZ=MR_DEFAULT_BUFFER_SIZE; #endif #ifdef MR_STATIC mr_mip->PRIMES=mr_small_primes; #else mr_mip->PRIMES=NULL; #ifndef MR_SIMPLE_IO mr_mip->IOBUFF=(char *)mr_alloc(_MIPP_ MR_DEFAULT_BUFFER_SIZE+1,1); #endif #endif #ifndef MR_SIMPLE_IO mr_mip->IOBUFF[0]='\0'; #endif mr_mip->qnr=0; mr_mip->cnr=0; mr_mip->TWIST=0; mr_mip->pmod8=0; mr_mip->pmod9=0; /* quick start for rng. irand(.) should be called first before serious use.. */ #ifndef MR_NO_RAND mr_mip->ira[0]=0x55555555; mr_mip->ira[1]=0x12345678; for (i=2;i<NK;i++) mr_mip->ira[i]=mr_mip->ira[i-1]+mr_mip->ira[i-2]+0x1379BDF1; mr_mip->rndptr=NK; mr_mip->borrow=0; #endif mr_mip->nib=2*mr_mip->nib+1; #ifdef MR_FLASH if (mr_mip->nib!=(mr_mip->nib&(MR_MSK))) #else if (mr_mip->nib!=(int)(mr_mip->nib&(MR_OBITS))) #endif { mr_berror(_MIPP_ MR_ERR_TOO_BIG); mr_mip->nib=(mr_mip->nib-1)/2; MR_OUT return mr_mip; } #ifndef MR_STATIC mr_mip->workspace=(char *)memalloc(_MIPP_ MR_SPACES); /* grab workspace */ #else memset(mr_mip->workspace,0,MR_BIG_RESERVE(MR_SPACES)); #endif mr_mip->M=0; mr_mip->fin=FALSE; mr_mip->fout=FALSE; mr_mip->active=ON; mr_mip->nib=(mr_mip->nib-1)/2; /* allocate memory for workspace variables */ #ifndef MR_DOUBLE_BIG mr_mip->w0=mirvar_mem(_MIPP_ mr_mip->workspace,0); /* double length */ mr_mip->w1=mirvar_mem(_MIPP_ mr_mip->workspace,2); mr_mip->w2=mirvar_mem(_MIPP_ mr_mip->workspace,3); mr_mip->w3=mirvar_mem(_MIPP_ mr_mip->workspace,4); mr_mip->w4=mirvar_mem(_MIPP_ mr_mip->workspace,5); mr_mip->w5=mirvar_mem(_MIPP_ mr_mip->workspace,6); /* double length */ mr_mip->w6=mirvar_mem(_MIPP_ mr_mip->workspace,8); /* double length */ mr_mip->w7=mirvar_mem(_MIPP_ mr_mip->workspace,10); /* double length */ mr_mip->w8=mirvar_mem(_MIPP_ mr_mip->workspace,12); mr_mip->w9=mirvar_mem(_MIPP_ mr_mip->workspace,13); mr_mip->w10=mirvar_mem(_MIPP_ mr_mip->workspace,14); mr_mip->w11=mirvar_mem(_MIPP_ mr_mip->workspace,15); mr_mip->w12=mirvar_mem(_MIPP_ mr_mip->workspace,16); mr_mip->w13=mirvar_mem(_MIPP_ mr_mip->workspace,17); mr_mip->w14=mirvar_mem(_MIPP_ mr_mip->workspace,18); mr_mip->w15=mirvar_mem(_MIPP_ mr_mip->workspace,19); mr_mip->sru=mirvar_mem(_MIPP_ mr_mip->workspace,20); mr_mip->modulus=mirvar_mem(_MIPP_ mr_mip->workspace,21); mr_mip->pR=mirvar_mem(_MIPP_ mr_mip->workspace,22); /* double length */ mr_mip->A=mirvar_mem(_MIPP_ mr_mip->workspace,24); mr_mip->B=mirvar_mem(_MIPP_ mr_mip->workspace,25); mr_mip->one=mirvar_mem(_MIPP_ mr_mip->workspace,26); #ifdef MR_KCM mr_mip->big_ndash=mirvar_mem(_MIPP_ mr_mip->workspace,27); mr_mip->ws=mirvar_mem(_MIPP_ mr_mip->workspace,28); mr_mip->wt=mirvar_mem(_MIPP_ mr_mip->workspace,29); /* double length */ #endif #ifdef MR_FLASH #ifdef MR_KCM mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,31); #else mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,27); #endif #endif #else /* w0-w7 are double normal length */ mr_mip->w0=mirvar_mem(_MIPP_ mr_mip->workspace,0); /* quad length */ mr_mip->w1=mirvar_mem(_MIPP_ mr_mip->workspace,4); /* double length */ mr_mip->w2=mirvar_mem(_MIPP_ mr_mip->workspace,6); mr_mip->w3=mirvar_mem(_MIPP_ mr_mip->workspace,8); mr_mip->w4=mirvar_mem(_MIPP_ mr_mip->workspace,10); mr_mip->w5=mirvar_mem(_MIPP_ mr_mip->workspace,12); /* quad length */ mr_mip->w6=mirvar_mem(_MIPP_ mr_mip->workspace,16); /* quad length */ mr_mip->w7=mirvar_mem(_MIPP_ mr_mip->workspace,20); /* quad length */ mr_mip->w8=mirvar_mem(_MIPP_ mr_mip->workspace,24); mr_mip->w9=mirvar_mem(_MIPP_ mr_mip->workspace,25); mr_mip->w10=mirvar_mem(_MIPP_ mr_mip->workspace,26); mr_mip->w11=mirvar_mem(_MIPP_ mr_mip->workspace,27); mr_mip->w12=mirvar_mem(_MIPP_ mr_mip->workspace,28); mr_mip->w13=mirvar_mem(_MIPP_ mr_mip->workspace,29); mr_mip->w14=mirvar_mem(_MIPP_ mr_mip->workspace,30); mr_mip->w15=mirvar_mem(_MIPP_ mr_mip->workspace,31); mr_mip->sru=mirvar_mem(_MIPP_ mr_mip->workspace,32); mr_mip->modulus=mirvar_mem(_MIPP_ mr_mip->workspace,33); mr_mip->pR=mirvar_mem(_MIPP_ mr_mip->workspace,34); /* double length */ mr_mip->A=mirvar_mem(_MIPP_ mr_mip->workspace,36); mr_mip->B=mirvar_mem(_MIPP_ mr_mip->workspace,37); mr_mip->one=mirvar_mem(_MIPP_ mr_mip->workspace,38); #ifdef MR_KCM mr_mip->big_ndash=mirvar_mem(_MIPP_ mr_mip->workspace,39); mr_mip->ws=mirvar_mem(_MIPP_ mr_mip->workspace,40); mr_mip->wt=mirvar_mem(_MIPP_ mr_mip->workspace,41); /* double length */ #endif #ifdef MR_FLASH #ifdef MR_KCM mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,43); #else mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,39); #endif #endif #endif MR_OUT return mr_mip; } #ifndef MR_STATIC /* allocate space for a number of bigs from the heap */ void *memalloc(_MIPD_ int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif return mr_alloc(_MIPP_ mr_big_reserve(num,mr_mip->nib-1),1); } #endif void memkill(_MIPD_ char *mem,int len) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mem==NULL) return; memset(mem,0,mr_big_reserve(len,mr_mip->nib-1)); #ifndef MR_STATIC mr_free(mem); #endif } #ifndef MR_STATIC void mirkill(big x) { /* kill a big/flash variable, that is set it to zero and free its memory */ if (x==NULL) return; zero(x); mr_free(x); } #endif void mirexit(_MIPDO_ ) { /* clean up after miracl */ int i; #ifdef MR_WINDOWS_MT miracl *mr_mip=get_mip(); #endif #ifdef MR_UNIX_MT miracl *mr_mip=get_mip(); #endif #ifdef MR_OPENMP_MT miracl *mr_mip=get_mip(); #endif mr_mip->ERCON=FALSE; mr_mip->active=OFF; memkill(_MIPP_ mr_mip->workspace,MR_SPACES); #ifndef MR_NO_RAND for (i=0;i<NK;i++) mr_mip->ira[i]=0L; #endif #ifndef MR_STATIC #ifndef MR_SIMPLE_IO set_io_buffer_size(_MIPP_ 0); #endif if (mr_mip->PRIMES!=NULL) mr_free(mr_mip->PRIMES); #else #ifndef MR_SIMPLE_IO for (i=0;i<=MR_DEFAULT_BUFFER_SIZE;i++) mr_mip->IOBUFF[i]=0; #endif #endif #ifndef MR_STATIC mr_free(mr_mip); #ifdef MR_WINDOWS_MT TlsSetValue(mr_key, NULL); /* Thank you Thales */ #endif #endif #ifndef MR_GENERIC_MT #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_STATIC mr_mip=NULL; #endif #endif #endif #endif #ifdef MR_OPENMP_MT mr_mip=NULL; #endif } int exsign(flash x) { /* extract sign of big/flash number */ if ((x->len&(MR_MSBIT))==0) return PLUS; else return MINUS; } void insign(int s,flash x) { /* assert sign of big/flash number */ if (x->len==0) return; if (s<0) x->len|=MR_MSBIT; else x->len&=MR_OBITS; } void mr_lzero(big x) { /* strip leading zeros from big number */ mr_lentype s; int m; s=(x->len&(MR_MSBIT)); m=(int)(x->len&(MR_OBITS)); while (m>0 && x->w[m-1]==0) m--; x->len=m; if (m>0) x->len|=s; } #ifndef MR_SIMPLE_IO int getdig(_MIPD_ big x,int i) { /* extract a packed digit */ int k; mr_small n; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif i--; n=x->w[i/mr_mip->pack]; if (mr_mip->pack==1) return (int)n; k=i%mr_mip->pack; for (i=1;i<=k;i++) n=MR_DIV(n,mr_mip->apbase); return (int)MR_REMAIN(n,mr_mip->apbase); } int numdig(_MIPD_ big x) { /* returns number of digits in x */ int nd; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (x->len==0) return 0; nd=(int)(x->len&(MR_OBITS))*mr_mip->pack; while (getdig(_MIPP_ x,nd)==0) nd--; return nd; } void putdig(_MIPD_ int n,big x,int i) { /* insert a digit into a packed word */ int j,k,lx; mr_small m,p; mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(26) s=(x->len&(MR_MSBIT)); lx=(int)(x->len&(MR_OBITS)); m=getdig(_MIPP_ x,i); p=n; i--; j=i/mr_mip->pack; k=i%mr_mip->pack; for (i=1;i<=k;i++) { m*=mr_mip->apbase; p*=mr_mip->apbase; } if (j>=mr_mip->nib && (mr_mip->check || j>=2*mr_mip->nib)) { mr_berror(_MIPP_ MR_ERR_OVERFLOW); MR_OUT return; } x->w[j]=(x->w[j]-m)+p; if (j>=lx) x->len=((j+1)|s); mr_lzero(x); MR_OUT } #endif #ifndef MR_FP void mr_and(big x,big y,big z) { /* z= bitwise logical AND of x and y */ int i,nx,ny,nz,nr; if (x==y) { copy(x,z); return; } #ifdef MR_FLASH nx=mr_lent(x); ny=mr_lent(y); nz=mr_lent(z); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); nz=(z->len&(MR_OBITS)); #endif if (ny<nx) nr=ny; else nr=nx; for (i=0;i<nr;i++) z->w[i]=x->w[i]&y->w[i]; for (i=nr;i<nz;i++) z->w[i]=0; z->len=nr; } void mr_xor(big x,big y,big z) { int i,nx,ny,nz,nr; if (x==y) { copy(x,z); return; } #ifdef MR_FLASH nx=mr_lent(x); ny=mr_lent(y); nz=mr_lent(z); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); nz=(z->len&(MR_OBITS)); #endif if (ny<nx) nr=nx; else nr=ny; for (i=0;i<nr;i++) z->w[i]=x->w[i]^y->w[i]; for (i=nr;i<nz;i++) z->w[i]=0; z->len=nr; } #endif void copy(flash x,flash y) { /* copy x to y: y=x */ int i,nx,ny; mr_small *gx,*gy; if (x==y || y==NULL) return; if (x==NULL) { zero(y); return; } #ifdef MR_FLASH ny=mr_lent(y); nx=mr_lent(x); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); #endif gx=x->w; gy=y->w; for (i=nx;i<ny;i++) gy[i]=0; for (i=0;i<nx;i++) gy[i]=gx[i]; y->len=x->len; } void negify(flash x,flash y) { /* negate a big/flash variable: y=-x */ copy(x,y); if (y->len!=0) y->len^=MR_MSBIT; } void absol(flash x,flash y) { /* y=abs(x) */ copy(x,y); y->len&=MR_OBITS; } BOOL mr_notint(flash x) { /* returns TRUE if x is Flash */ #ifdef MR_FLASH if ((((x->len&(MR_OBITS))>>(MR_BTS))&(MR_MSK))!=0) return TRUE; #endif return FALSE; } void mr_shift(_MIPD_ big x,int n,big w) { /* set w=x.(mr_base^n) by shifting */ mr_lentype s; int i,bl; mr_small *gw=w->w; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; copy(x,w); if (w->len==0 || n==0) return; MR_IN(33) if (mr_notint(w)) mr_berror(_MIPP_ MR_ERR_INT_OP); s=(w->len&(MR_MSBIT)); bl=(int)(w->len&(MR_OBITS))+n; if (bl<=0) { zero(w); MR_OUT return; } if (bl>mr_mip->nib && mr_mip->check) mr_berror(_MIPP_ MR_ERR_OVERFLOW); if (mr_mip->ERNUM) { MR_OUT return; } if (n>0) { for (i=bl-1;i>=n;i--) gw[i]=gw[i-n]; for (i=0;i<n;i++) gw[i]=0; } else { n=(-n); for (i=0;i<bl;i++) gw[i]=gw[i+n]; for (i=0;i<n;i++) gw[bl+i]=0; } w->len=(bl|s); MR_OUT } int size(big x) { /* get size of big number; convert to * * integer - if possible */ int n,m; mr_lentype s; if (x==NULL) return 0; s=(x->len&MR_MSBIT); m=(int)(x->len&MR_OBITS); if (m==0) return 0; if (m==1 && x->w[0]<(mr_small)MR_TOOBIG) n=(int)x->w[0]; else n=MR_TOOBIG; if (s==MR_MSBIT) return (-n); return n; } int mr_compare(big x,big y) { /* compare x and y: =1 if x>y =-1 if x<y * * =0 if x=y */ int m,n,sig; mr_lentype sx,sy; if (x==y) return 0; sx=(x->len&MR_MSBIT); sy=(y->len&MR_MSBIT); if (sx==0) sig=PLUS; else sig=MINUS; if (sx!=sy) return sig; m=(int)(x->len&MR_OBITS); n=(int)(y->len&MR_OBITS); if (m>n) return sig; if (m<n) return -sig; while (m>0) { /* check digit by digit */ m--; if (x->w[m]>y->w[m]) return sig; if (x->w[m]<y->w[m]) return -sig; } return 0; } #ifdef MR_FLASH void fpack(_MIPD_ big n,big d,flash x) { /* create floating-slash number x=n/d from * * big integer numerator and denominator */ mr_lentype s; int i,ld,ln; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(31) ld=(int)(d->len&MR_OBITS); if (ld==0) mr_berror(_MIPP_ MR_ERR_FLASH_OVERFLOW); if (ld==1 && d->w[0]==1) ld=0; if (x==d) mr_berror(_MIPP_ MR_ERR_BAD_PARAMETERS); if (mr_notint(n) || mr_notint(d)) mr_berror(_MIPP_ MR_ERR_INT_OP); s=(n->len&MR_MSBIT); ln=(int)(n->len&MR_OBITS); if (ln==1 && n->w[0]==1) ln=0; if ((ld+ln>mr_mip->nib) && (mr_mip->check || ld+ln>2*mr_mip->nib)) mr_berror(_MIPP_ MR_ERR_FLASH_OVERFLOW); if (mr_mip->ERNUM) { MR_OUT return; } copy(n,x); if (n->len==0) { MR_OUT return; } s^=(d->len&MR_MSBIT); if (ld==0) { if (x->len!=0) x->len|=s; MR_OUT return; } for (i=0;i<ld;i++) x->w[ln+i]=d->w[i]; x->len=(s|(ln+((mr_lentype)ld<<MR_BTS))); MR_OUT } void numer(_MIPD_ flash x,big y) { /* extract numerator of x */ int i,ln,ld; mr_lentype s,ly; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; if (mr_notint(x)) { s=(x->len&MR_MSBIT); ly=(x->len&MR_OBITS); ln=(int)(ly&MR_MSK); if (ln==0) { if(s==MR_MSBIT) convert(_MIPP_ (-1),y); else convert(_MIPP_ 1,y); return; } ld=(int)((ly>>MR_BTS)&MR_MSK); if (x!=y) { for (i=0;i<ln;i++) y->w[i]=x->w[i]; for (i=ln;i<mr_lent(y);i++) y->w[i]=0; } else for (i=0;i<ld;i++) y->w[ln+i]=0; y->len=(ln|s); } else copy(x,y); } void denom(_MIPD_ flash x,big y) { /* extract denominator of x */ int i,ln,ld; mr_lentype ly; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; if (!mr_notint(x)) { convert(_MIPP_ 1,y); return; } ly=(x->len&MR_OBITS); ln=(int)(ly&MR_MSK); ld=(int)((ly>>MR_BTS)&MR_MSK); for (i=0;i<ld;i++) y->w[i]=x->w[ln+i]; if (x==y) for (i=0;i<ln;i++) y->w[ld+i]=0; else for (i=ld;i<mr_lent(y);i++) y->w[i]=0; y->len=ld; } #endif unsigned int igcd(unsigned int x,unsigned int y) { /* integer GCD, returns GCD of x and y */ unsigned int r; if (y==0) return x; while ((r=x%y)!=0) x=y,y=r; return y; } unsigned long lgcd(unsigned long x,unsigned long y) { /* long GCD, returns GCD of x and y */ unsigned long r; if (y==0) return x; while ((r=x%y)!=0) x=y,y=r; return y; } unsigned int isqrt(unsigned int num,unsigned int guess) { /* square root of an integer */ unsigned int sqr; unsigned int oldguess=guess; if (num==0) return 0; if (num<4) return 1; for (;;) { /* Newtons iteration */ /* sqr=guess+(((num/guess)-guess)/2); */ sqr=((num/guess)+guess)/2; if (sqr==guess || sqr==oldguess) { if (sqr*sqr>num) sqr--; return sqr; } oldguess=guess; guess=sqr; } } unsigned long mr_lsqrt(unsigned long num,unsigned long guess) { /* square root of a long */ unsigned long sqr; unsigned long oldguess=guess; if (num==0) return 0; if (num<4) return 1; for (;;) { /* Newtons iteration */ /* sqr=guess+(((num/guess)-guess)/2); */ sqr=((num/guess)+guess)/2; if (sqr==guess || sqr==oldguess) { if (sqr*sqr>num) sqr--; return sqr; } oldguess=guess; guess=sqr; } } mr_small sgcd(mr_small x,mr_small y) { /* integer GCD, returns GCD of x and y */ mr_small r; #ifdef MR_FP mr_small dres; #endif if (y==(mr_small)0) return x; while ((r=MR_REMAIN(x,y))!=(mr_small)0) x=y,y=r; return y; } /* routines to support sliding-windows exponentiation * * in various contexts */ int mr_testbit(_MIPD_ big x,int n) { /* return value of n-th bit of big */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifdef MR_FP mr_small m,a,dres; m=mr_shiftbits((mr_small)1,n%mr_mip->lg2b); a=x->w[n/mr_mip->lg2b]; a=MR_DIV(a,m); if ((MR_DIV(a,2.0)*2.0) != a) return 1; #else if ((x->w[n/mr_mip->lg2b] & ((mr_small)1<<(n%mr_mip->lg2b))) >0) return 1; #endif return 0; } void mr_addbit(_MIPD_ big x,int n) { /* add 2^n to positive x - where you know that bit is zero. Use with care! */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif mr_lentype m=n/mr_mip->lg2b; x->w[m]+=mr_shiftbits((mr_small)1,n%mr_mip->lg2b); if (x->len<m+1) x->len=m+1; } int recode(_MIPD_ big e,int t,int w,int i) { /* recode exponent for Comb method */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif int j,r; r=0; for (j=w-1;j>=0;j--) { r<<=1; r|=mr_testbit(_MIPP_ e,i+j*t); } return r; } int mr_window(_MIPD_ big x,int i,int *nbs,int * nzs,int window_size) { /* returns sliding window value, max. of 5 bits, * * (Note from version 5.23 this can be changed by * * setting parameter window_size. This can be * * a useful space-saver) starting at i-th bit of big x. * * nbs is number of bits processed, nzs is the number of * * additional trailing zeros detected. Returns valid bit * * pattern 1x..x1 with no two adjacent 0's. So 10101 * * will return 21 with nbs=5, nzs=0. 11001 will return 3,* * with nbs=2, nzs=2, having stopped after the first 11..*/ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif int j,r,w; w=window_size; /* check for leading 0 bit */ *nbs=1; *nzs=0; if (!mr_testbit(_MIPP_ x,i)) return 0; /* adjust window size if not enough bits left */ if (i-w+1<0) w=i+1; r=1; for (j=i-1;j>i-w;j--) { /* accumulate bits. Abort if two 0's in a row */ (*nbs)++; r*=2; if (mr_testbit(_MIPP_ x,j)) r+=1; if (r%4==0) { /* oops - too many zeros - shorten window */ r/=4; *nbs-=2; *nzs=2; break; } } if (r%2==0) { /* remove trailing 0 */ r/=2; *nzs=1; (*nbs)--; } return r; } int mr_window2(_MIPD_ big x,big y,int i,int *nbs,int *nzs) { /* two bit window for double exponentiation */ int r,w; BOOL a,b,c,d; w=2; *nbs=1; *nzs=0; /* check for two leading 0's */ a=mr_testbit(_MIPP_ x,i); b=mr_testbit(_MIPP_ y,i); if (!a && !b) return 0; if (i<1) w=1; if (a) { if (b) r=3; else r=2; } else r=1; if (w==1) return r; c=mr_testbit(_MIPP_ x,i-1); d=mr_testbit(_MIPP_ y,i-1); if (!c && !d) { *nzs=1; return r; } *nbs=2; r*=4; if (c) { if (d) r+=3; else r+=2; } else r+=1; return r; } int mr_naf_window(_MIPD_ big x,big x3,int i,int *nbs,int *nzs,int store) { /* returns sliding window value, using fractional windows * * where "store" precomputed values are precalulated and * * stored. Scanning starts at the i-th bit of x. nbs is * * the number of bits processed. nzs is number of * * additional trailing zeros detected. x and x3 (which is * * 3*x) are combined to produce the NAF (non-adjacent * * form). So if x=11011(27) and x3 is 1010001, the LSB is * * ignored and the value 100T0T (32-4-1=27) processed, * * where T is -1. Note x.P = (3x-x)/2.P. This value will * * return +7, with nbs=4 and nzs=1, having stopped after * * the first 4 bits. If it goes too far, it must backtrack * * Note in an NAF non-zero elements are never side by side, * * so 10T10T won't happen. NOTE: return value n zero or * * odd, -21 <= n <= +21 */ int nb,j,r,biggest; /* get first bit */ nb=mr_testbit(_MIPP_ x3,i)-mr_testbit(_MIPP_ x,i); *nbs=1; *nzs=0; if (nb==0) return 0; if (i==0) return nb; biggest=2*store-1; if (nb>0) r=1; else r=(-1); for (j=i-1;j>0;j--) { (*nbs)++; r*=2; nb=mr_testbit(_MIPP_ x3,j)-mr_testbit(_MIPP_ x,j); if (nb>0) r+=1; if (nb<0) r-=1; if (abs(r)>biggest) break; } if (r%2!=0 && j!=0) { /* backtrack */ if (nb>0) r=(r-1)/2; if (nb<0) r=(r+1)/2; (*nbs)--; } while (r%2==0) { /* remove trailing zeros */ r/=2; (*nzs)++; (*nbs)--; } return r; } /* Some general purpose elliptic curve stuff */ BOOL point_at_infinity(epoint *p) { if (p==NULL) return FALSE; if (p->marker==MR_EPOINT_INFINITY) return TRUE; return FALSE; } #ifndef MR_STATIC epoint* epoint_init(_MIPDO_ ) { /* initialise epoint to general point at infinity. */ epoint *p; char *ptr; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; MR_IN(96) /* Create space for whole structure in one heap access */ p=(epoint *)mr_alloc(_MIPP_ mr_esize(mr_mip->nib-1),1); ptr=(char *)p+sizeof(epoint); p->X=mirvar_mem(_MIPP_ ptr,0); p->Y=mirvar_mem(_MIPP_ ptr,1); #ifndef MR_AFFINE_ONLY p->Z=mirvar_mem(_MIPP_ ptr,2); #endif p->marker=MR_EPOINT_INFINITY; MR_OUT return p; } #endif epoint* epoint_init_mem_variable(_MIPD_ char *mem,int index,int sz) { epoint *p; char *ptr; int offset,r; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif offset=0; r=(unsigned long)mem%MR_SL; if (r>0) offset=MR_SL-r; #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) p=(epoint *)&mem[offset+index*mr_esize_a(sz)]; else #endif p=(epoint *)&mem[offset+index*mr_esize(sz)]; ptr=(char *)p+sizeof(epoint); p->X=mirvar_mem_variable(ptr,0,sz); p->Y=mirvar_mem_variable(ptr,1,sz); #ifndef MR_AFFINE_ONLY if (mr_mip->coord!=MR_AFFINE) p->Z=mirvar_mem_variable(ptr,2,sz); #endif p->marker=MR_EPOINT_INFINITY; return p; } epoint* epoint_init_mem(_MIPD_ char *mem,int index) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; return epoint_init_mem_variable(_MIPP_ mem,index,mr_mip->nib-1); } #ifndef MR_STATIC /* allocate space for a number of epoints from the heap */ void *ecp_memalloc(_MIPD_ int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) return mr_alloc(_MIPP_ mr_ecp_reserve_a(num,mr_mip->nib-1),1); else #endif return mr_alloc(_MIPP_ mr_ecp_reserve(num,mr_mip->nib-1),1); } #endif void ecp_memkill(_MIPD_ char *mem,int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mem==NULL) return; #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) memset(mem,0,mr_ecp_reserve_a(num,mr_mip->nib-1)); else #endif memset(mem,0,mr_ecp_reserve(num,mr_mip->nib-1)); #ifndef MR_STATIC mr_free(mem); #endif } #ifndef MR_STATIC void epoint_free(epoint *p) { /* clean up point */ if (p==NULL) return; zero(p->X); zero(p->Y); #ifndef MR_AFFINE_ONLY if (p->marker==MR_EPOINT_GENERAL) zero(p->Z); #endif mr_free(p); } #endif
GB_unaryop__minv_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_bool_fp64 // op(A') function: GB_tran__minv_bool_fp64 // C type: bool // A type: double // cast: ; // unaryop: cij = true #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CASTING(z, aij) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_bool_fp64 ( bool *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
normal.c
/* ============================================================================= * * normal.c * -- Implementation of normal k-means clustering algorithm * * ============================================================================= * * Author: * * Wei-keng Liao * ECE Department, Northwestern University * email: wkliao@ece.northwestern.edu * * * Edited by: * * Jay Pisharath * Northwestern University. * * Chi Cao Minh * Stanford University * * ============================================================================= * * For the license of bayes/sort.h and bayes/sort.c, please see the header * of the files. * * ------------------------------------------------------------------------ * * For the license of kmeans, please see kmeans/LICENSE.kmeans * * ------------------------------------------------------------------------ * * For the license of ssca2, please see ssca2/COPYRIGHT * * ------------------------------------------------------------------------ * * For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the * header of the files. * * ------------------------------------------------------------------------ * * For the license of lib/rbtree.h and lib/rbtree.c, please see * lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree * * ------------------------------------------------------------------------ * * Unless otherwise noted, the following license applies to STAMP files: * * Copyright (c) 2007, Stanford University * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Stanford University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * ============================================================================= */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> #include "common.h" #include "normal.h" #include "random.h" #include "thread.h" #include "timer.h" #include "tm.h" #include "util.h" double global_time = 0.0; typedef struct args { float** feature; int nfeatures; int npoints; int nclusters; int* membership; float** clusters; int** new_centers_len; float** new_centers; } args_t; float global_delta; int global_i; /* index into task queue */ #define CHUNK 3 /* ============================================================================= * work * ============================================================================= */ static void work (void* argPtr) { TM_THREAD_ENTER(); args_t* args = (args_t*)argPtr; float** feature = args->feature; int nfeatures = args->nfeatures; int npoints = args->npoints; int nclusters = args->nclusters; int* membership = args->membership; float** clusters = args->clusters; int** new_centers_len = args->new_centers_len; float** new_centers = args->new_centers; float delta = 0.0; int index; int i; int j; int start; int stop; int myId; myId = thread_getId(); start = myId * CHUNK; while (start < npoints) { stop = (((start + CHUNK) < npoints) ? (start + CHUNK) : npoints); for (i = start; i < stop; i++) { index = common_findNearestPoint(feature[i], nfeatures, clusters, nclusters); /* * If membership changes, increase delta by 1. * membership[i] cannot be changed by other threads */ if (membership[i] != index) { delta += 1.0; } /* Assign the membership to object i */ /* membership[i] can't be changed by other thread */ membership[i] = index; /* Update new cluster centers : sum of objects located within */ TM_BEGIN(); TM_SHARED_WRITE(*new_centers_len[index], TM_SHARED_READ(*new_centers_len[index]) + 1); for (j = 0; j < nfeatures; j++) { TM_SHARED_WRITE_F( new_centers[index][j], (TM_SHARED_READ_F(new_centers[index][j]) + feature[i][j]) ); } TM_END(); } /* Update task queue */ if (start + CHUNK < npoints) { TM_BEGIN(); start = (int)TM_SHARED_READ(global_i); TM_SHARED_WRITE(global_i, (start + CHUNK)); TM_END(); } else { break; } } TM_BEGIN(); TM_SHARED_WRITE_F(global_delta, TM_SHARED_READ_F(global_delta) + delta); TM_END(); TM_THREAD_EXIT(); } /* ============================================================================= * normal_exec * ============================================================================= */ float** normal_exec (int nthreads, float** feature, /* in: [npoints][nfeatures] */ int nfeatures, int npoints, int nclusters, float threshold, int* membership, random_t* randomPtr) /* out: [npoints] */ { int i; int j; int loop = 0; int** new_centers_len; /* [nclusters]: no. of points in each cluster */ float delta; float** clusters; /* out: [nclusters][nfeatures] */ float** new_centers; /* [nclusters][nfeatures] */ void* alloc_memory = NULL; args_t args; TIMER_T start; TIMER_T stop; /* Allocate space for returning variable clusters[] */ clusters = (float**)malloc(nclusters * sizeof(float*)); assert(clusters); clusters[0] = (float*)malloc(nclusters * nfeatures * sizeof(float)); assert(clusters[0]); for (i = 1; i < nclusters; i++) { clusters[i] = clusters[i-1] + nfeatures; } /* Randomly pick cluster centers */ for (i = 0; i < nclusters; i++) { int n = (int)(random_generate(randomPtr) % npoints); for (j = 0; j < nfeatures; j++) { clusters[i][j] = feature[n][j]; } } for (i = 0; i < npoints; i++) { membership[i] = -1; } /* * Need to initialize new_centers_len and new_centers[0] to all 0. * Allocate clusters on different cache lines to reduce false sharing. */ { int cluster_size = sizeof(int) + sizeof(float) * nfeatures; const int cacheLineSize = 32; cluster_size += (cacheLineSize-1) - ((cluster_size-1) % cacheLineSize); alloc_memory = calloc(nclusters, cluster_size); new_centers_len = (int**) malloc(nclusters * sizeof(int*)); new_centers = (float**) malloc(nclusters * sizeof(float*)); assert(alloc_memory && new_centers && new_centers_len); for (i = 0; i < nclusters; i++) { new_centers_len[i] = (int*)((char*)alloc_memory + cluster_size * i); new_centers[i] = (float*)((char*)alloc_memory + cluster_size * i + sizeof(int)); } } TIMER_READ(start); GOTO_SIM(); OSA_PRINT("entering parallel phase\n",0); START_INSTRUMENTATION(); do { delta = 0.0; args.feature = feature; args.nfeatures = nfeatures; args.npoints = npoints; args.nclusters = nclusters; args.membership = membership; args.clusters = clusters; args.new_centers_len = new_centers_len; args.new_centers = new_centers; global_i = nthreads * CHUNK; global_delta = delta; #ifdef OTM #pragma omp parallel { work(&args); } #else thread_start(work, &args); #endif delta = global_delta; /* Replace old cluster centers with new_centers */ for (i = 0; i < nclusters; i++) { for (j = 0; j < nfeatures; j++) { if (new_centers_len[i] > 0) { clusters[i][j] = new_centers[i][j] / *new_centers_len[i]; } new_centers[i][j] = 0.0; /* set back to 0 */ } *new_centers_len[i] = 0; /* set back to 0 */ } delta /= npoints; } while ((delta > threshold) && (loop++ < 500)); OSA_PRINT("exiting parallel phase\n",0); OSA_PRINT("END BENCHMARK kmeans-parallel-phase",0); STOP_INSTRUMENTATION(); GOTO_REAL(); TIMER_READ(stop); global_time += TIMER_DIFF_SECONDS(start, stop); free(alloc_memory); free(new_centers); free(new_centers_len); return clusters; } /* ============================================================================= * * End of normal.c * * ============================================================================= */
omp_parfor.c
#include <omp.h> #include <stdio.h> #include <time.h> double CLOCK() { struct timespec t; clock_gettime(CLOCK_MONOTONIC, &t); return (t.tv_sec * 1000) + (t.tv_nsec * 1e-6); } main(int argc, char **argv) { unsigned int i; double start, finish, total1, total2, total3; double a[1000000]; start = CLOCK(); // #pragma omp for for (i = 0; i < 1000000; i++) { a[i] = 2.0 * i; a[i] += (i % 3); } finish = CLOCK(); total1 = finish - start; start = CLOCK(); #pragma omp parallel for for (i = 0; i < 1000000; i++) { a[i] = 2.0 * i; a[i] += (i % 3); } finish = CLOCK(); total2 = finish - start; start = CLOCK(); for (i = 0; i < 1000000; i++) { a[i] = 2.0 * i; a[i] += (i % 3); } finish = CLOCK(); total3 = finish - start; //for (i=0; i<1000000; i++) // printf("The value for a[%li] = %f\n", i, a[i]); printf("Time for first loop = %f\n", total1); printf("Time for second loop = %f\n", total2); printf("Time for third loop = %f\n", total3); return 0; }
sieve.c
/* * Adapted from: http://w...content-available-to-author-only...s.org/sieve-of-eratosthenes */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <math.h> int sieveOfEratosthenes(int n) { // Create a boolean array "prime[0..n]" and initialize // all entries it as true. A value in prime[i] will // finally be false if i is Not a prime, else true. int primes = 0; bool *prime = (bool*) malloc((n+1)*sizeof(bool)); int sqrt_n = sqrt(n); memset(prime, true,(n+1)*sizeof(bool)); int i, p; #pragma omp parallel for for (p=2; p <= sqrt_n; p++) { // If prime[p] is not changed, then it is a prime if (prime[p] == true) { // Update all multiples of p #pragma omp parallel for for(i=p*2; i<=n; i += p) prime[i] = false; } } // count prime numbers #pragma omp parallel for reduction(+:primes) for (int p=2; p<=n; p++) if (prime[p]) primes++; return(primes); } int main() { int n = 100000000; printf("%d\n",sieveOfEratosthenes(n)); return 0; }
DenseVector.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseVector.h // \brief Header file for the OpenMP-based dense vector SMP implementation // // Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include "../../../math/Aliases.h" #include "../../../math/constraints/SMPAssignable.h" #include "../../../math/expressions/DenseVector.h" #include "../../../math/expressions/SparseVector.h" #include "../../../math/functors/AddAssign.h" #include "../../../math/functors/Assign.h" #include "../../../math/functors/DivAssign.h" #include "../../../math/functors/MultAssign.h" #include "../../../math/functors/SubAssign.h" #include "../../../math/simd/SIMDTrait.h" #include "../../../math/smp/ParallelSection.h" #include "../../../math/smp/SerialSection.h" #include "../../../math/typetraits/IsDenseVector.h" #include "../../../math/typetraits/IsSIMDCombinable.h" #include "../../../math/typetraits/IsSMPAssignable.h" #include "../../../math/views/Subvector.h" #include "../../../system/SMP.h" #include "../../../util/algorithms/Min.h" #include "../../../util/Assert.h" #include "../../../util/EnableIf.h" #include "../../../util/FunctionTrace.h" #include "../../../util/StaticAssert.h" #include "../../../util/Types.h" namespace blaze { //================================================================================================= // // OPENMP-BASED ASSIGNMENT KERNELS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 // Transpose flag of the right-hand side dense vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); using ET1 = ElementType_t<VT1>; using ET2 = ElementType_t<VT2>; constexpr bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> ); constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<VT1> >::size ); const bool lhsAligned( (~lhs).isAligned() ); const bool rhsAligned( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && lhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && rhsAligned ) { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 // Transpose flag of the right-hand side sparse vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (~lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \return void // // This function performs the OpenMP-based SMP assignment to a dense vector. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, Assign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, AddAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a vector to // a dense vector. Due to the explicit application of the SFINAE principle, this function can // only be selected by the compiler in case both operands are SMP-assignable and the element // types of both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be subtracted. // \return void // // This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due // to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, SubAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // vector. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be multiplied. // \return void // // This function implements the OpenMP-based SMP multiplication assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { multAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, MultAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // DIVISION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector divisor. // \return void // // This function implements the default OpenMP-based SMP division assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); divAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector divisor. // \return void // // This function implements the OpenMP-based SMP division assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { divAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, DivAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINTS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
test9.c
void foo (int a) { 0; if (1) { 2; #pragma omp barrier 3; } else { 4; foo(3); 5; } } int main() { #pragma omp parallel { 6; if (7) { 8; foo(9); 10; } else { 11; #pragma omp barrier 12; #pragma omp barrier 13; } 14; #pragma omp barrier 15; } }
rose_shared.c
/* * dependence graph: */ #include "omp.h" void foo() { int i; int x; int a[100]; #pragma omp parallel for private (i) for (i = 0; i <= 99; i += 1) { a[i] = a[i] + 1; } } /* non loop carried anti dependence for array accesses : level =1 > 0 dep SgExprStatement:a[i] =((a[i]) + 1); SgExprStatement:a[i] =((a[i]) + 1); 1*1 ANTI_DEP; commonlevel = 1 CarryLevel = 1 Is precise SgPntrArrRefExp:(a[i])@10:11->SgPntrArrRefExp:a[i]@10:9 == 0;||:: */
omp_private.c
/* This file shows the problems with private variables which its value is NOT * copied. It also shows how the "firstprivate" directive can help with that. * * Code adapted from Introduction to Parallel Programming by Peter Pacheco. */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char* argv[]) { int x = 5; int thread_count = 8; #pragma omp parallel \ num_threads(thread_count) \ private(x) { int my_rank = omp_get_thread_num(); printf("Thread %d > before initialization, x = %d\n", my_rank, x); x = 2 * my_rank + 2; printf("Thread %d > after initialization, x = %d\n", my_rank, x); } printf("After parallel block, x = %d\n", x); return 0; }
nary-search.c
#include <stdio.h> #include <math.h> #include <omp.h> void main() { int sep[20], array[20], key, i, j, n, left, right, size, interval, index, break_value = 0, tid; printf("Enter the size of array\n"); scanf("%d", &size); printf("Enter the elements of array in ascending order\n"); for (i = 0; i < size; i++) { scanf("%d", &array[i]); } printf("Enter the key to be searched\n"); scanf("%d", &key); printf("Enter the value of n for n-ary search algorithm\n"); scanf("%d", &n); left = 0; right = size - 1; if (key >= array[left] && key <= array[right]) { while (left != right) { // (start) code to find seperators printf("left=%d, right=%d, size=%d\n", left, right, size); if (size <= n) { #pragma omp parallel for num_threads(size) for (i = 0; i < size; i++) { sep[i] = left + i; tid = omp_get_thread_num(); printf("Thread %d allocated sep[%d]=%d\n", tid, i, sep[i]); } } else { sep[0] = left; interval = ceil((float)size / (float)n); #pragma omp parallel for num_threads(n - 1) for (i = 1; i <= n - 1; i++) { sep[i] = left + interval * i - 1; tid = omp_get_thread_num(); printf("Thread %d allocated sep[%d]=%d\n", tid, i, sep[i]); } sep[n] = right; } // (end) Code to find seperators // (start) Code for comparison for (i = 0; i <= n; i++) { if (key == array[sep[i]]) { index = sep[i]; printf("Element found at position %d\n", index + 1); break_value = 1; break; } if (key < array[sep[i]]) { right = sep[i]; if (i != 0) left = 1 + sep[i - 1]; size = right - left + 1; break; } } // (end) Code for comparison if (break_value == 1) break; } //End of 'while' loop } //End of 'if' if (left == right || !(key >= array[left] && key <= array[right])) printf("Element does not present in the list\n"); return 0; } //End of main() // For compilation : gcc - fopenmp nary - search.c - lm // (Note: -fopenmp is used to use "OpenMP" library while -lm is used to use "math.h"). // To run: // ./a.out
RCCE_lib.h
// // Copyright 2010 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef RCCE_LIB_H #define RCCE_LIB_H #include "RCCE.h" #if defined(_OPENMP) && !defined(__hermit__) #include <omp.h> #endif #include <string.h> //#define AIR #undef USE_FLAG_EXPERIMENTAL #undef USE_RCCE_COMM #undef USE_FAT_BARRIER #undef USE_PIPELINE_FLAGS #undef USE_PROBE_FLAGS #undef USE_TAGGED_FLAGS #undef USE_TAGGED_FOR_SHORT #undef USE_REVERTED_FLAGS #undef USE_REMOTE_PUT_LOCAL_GET #undef USE_PROBE_FLAGS_SHORTCUT #define USE_SYNCH_FOR_ZERO_BYTE // override certain settings for SCC-MPICH: //#include "scc-mpich-defs.h" // adjust settings automatically? #undef AUTO_ADJUST_SETTINGS //////////////////////////////////////////////////////////////////////////////////////////////// #ifdef AUTO_ADJUST_SETTINGS #ifdef SINGLEBITFLAGS #ifdef USE_TAGGED_FLAGS #warning TAGGED FLAGS CANNOT BE USED WITH SINGLEBITFLAGS! (#undef USE_TAGGED_FLAGS) #undef USE_TAGGED_FLAGS #undef USE_TAGGED_FOR_SHORT #undef USE_PROBE_FLAGS_SHORTCUT #endif #ifdef USE_FAT_BARRIER #warning FAT BARRIER CANNOT BE USED WITH SINGLEBITFLAGS! (#undef USE_FAT_BARRIER) #undef USE_FAT_BARRIER #endif #endif #ifdef USE_PROBE_FLAGS_SHORTCUT #ifndef USE_PROBE_FLAGS #warning THE PROBE FLAGS SHORTCUT REQUIRES PROBE FLAGS! (#define USE_PROBE_FLAGS) #define USE_PROBE_FLAGS #endif #ifndef USE_TAGGED_FOR_SHORT #warning THE PROBE FLAGS SHORTCUT REQUIRES TAGGED FLAGS! (#define USE_TAGGED_FLAGS) #define USE_TAGGED_FLAGS #endif #endif #ifdef USE_TAGGED_FOR_SHORT #ifndef USE_TAGGED_FLAGS #warning TAGGED SHORT MESSAGES REQUIRE TAGGED FLAGS! (#define USE_TAGGED_FLAGS) #define USE_TAGGED_FLAGS #endif #endif #ifdef USE_REMOTE_PUT_LOCAL_GET #ifndef USE_PROBE_FLAGS #warning PROBING FOR MESSAGES IN REMOTE-PUT/LOCAL-GET NEEDS ADDITIONAL PROBE FLAGS! (#define USE_PROBE_FLAGS) #define USE_PROBE_FLAGS #endif #endif #ifdef SCC_COUPLED_SYSTEMS #ifndef USE_REVERTED_FLAGS #ifdef USE_TAGGED_FLAGS #warning COUPLED SYSTEMS REQUIRE REVERTED FLAGS WHEN USING TAGGED FLAGS! (#define USE_REVERTED_FLAGS) #define USE_REVERTED_FLAGS #endif #endif #ifndef USE_REMOTE_PUT_LOCAL_GET #warning COUPLED SYSTEMS SHOULD USE REMOTE-PUT/LOCAL-GET! (#define USE_REMOTE_PUT_LOCAL_GET) #define USE_REMOTE_PUT_LOCAL_GET #endif #else #ifdef USE_PROBE_FLAGS #warning NON-COUPLED SYSTEMS SHOULD NOT USE ADDITIONAL PROBE FLAGS! (#undef USE_PROBE_FLAGS) #undef USE_PROBE_FLAGS #endif #endif #ifdef USE_PROBE_FLAGS #ifdef USE_FAT_BARRIER #warning PROBABLY TOO LITTLE MPB SPACE FOR USING FAT BARRIER WITH PROBE FLAGS ENABLED! (#undef USE_FAT_BARRIER) #undef USE_FAT_BARRIER #endif #endif //////////////////////////////////////////////////////////////////////////////////////////////// #else // !AUTO_ADJUST_SETTINGS #ifdef SINGLEBITFLAGS #ifdef USE_TAGGED_FLAGS #error TAGGED FLAGS CANNOT BE USED WITH SINGLEBITFLAGS! (#undef USE_TAGGED_FLAGS) #endif #undef USE_TAGGED_FLAGS #undef USE_TAGGED_FOR_SHORT #undef USE_PROBE_FLAGS_SHORTCUT #ifdef USE_FAT_BARRIER #error FAT BARRIER CANNOT BE USED WITH SINGLEBITFLAGS! (#undef USE_FAT_BARRIER) #endif #endif #ifdef USE_PROBE_FLAGS_SHORTCUT #ifndef USE_PROBE_FLAGS #error THE PROBE FLAGS SHORTCUT REQUIRES PROBE FLAGS! (#define USE_PROBE_FLAGS) #endif #ifndef USE_TAGGED_FOR_SHORT #error THE PROBE FLAGS SHORTCUT REQUIRES TAGGED FLAGS! (#define USE_TAGGED_FLAGS) #endif #endif #ifdef USE_TAGGED_FOR_SHORT #ifndef USE_TAGGED_FLAGS #error TAGGED SHORT MESSAGES REQUIRE TAGGED FLAGS! (#define USE_TAGGED_FLAGS) #endif #endif #ifdef USE_REMOTE_PUT_LOCAL_GET #ifndef USE_PROBE_FLAGS #warning PROBING FOR MESSAGES IN REMOTE-PUT/LOCAL-GET NEEDS ADDITIONAL PROBE FLAGS! (#define USE_PROBE_FLAGS) #endif #endif #ifdef SCC_COUPLED_SYSTEMS #ifdef USE_TAGGED_FLAGS #ifndef USE_REVERTED_FLAGS #error COUPLED SYSTEMS REQUIRE REVERTED FLAGS WHEN USING TAGGED FLAGS! (#define USE_REVERTED_FLAGS) #endif #endif #ifndef USE_REMOTE_PUT_LOCAL_GET #warning COUPLED SYSTEMS SHOULD USE REMOTE-PUT/LOCAL-GET! (#define USE_REMOTE_PUT_LOCAL_GET) #endif #else #ifdef USE_PROBE_FLAGS #warning NON-COUPLED SYSTEMS SHOULD NOT USE ADDITIONAL PROBE FLAGS! (#undef USE_PROBE_FLAGS) #endif #endif #ifdef USE_PROBE_FLAGS #ifdef USE_FAT_BARRIER #warning PROBABLY TOO LITTLE MPB SPACE FOR USING FAT BARRIER WITH PROBE FLAGS ENABLED! (#undef USE_FAT_BARRIER) #endif #endif #endif // !AUTO_ADJUST_SETTINGS //////////////////////////////////////////////////////////////////////////////////////////////// /* PAD32byte is used to compute a cacheline padded length of n (input) bytes */ #define PAD32byte(n) ((n)%32==0 ? (n) : (n) + 32 - (n)%32) //#define BITSPERCHAR 8 #define BOTH_IN_COMM_BUFFER 12 #define SOURCE_IN_PRIVATE_MEMORY 34 #define TARGET_IN_PRIVATE_MEMORY 56 #ifdef SINGLEBITFLAGS #define RCCE_FLAGS_PER_BYTE 8 #else #define RCCE_FLAGS_PER_BYTE 1 #endif #define RCCE_FLAGS_PER_LINE (RCCE_LINE_SIZE*RCCE_FLAGS_PER_BYTE) #define RCCE_SUM_INT (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_SUM_LONG (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_SUM_FLOAT (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_SUM_DOUBLE (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_MAX_INT (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_MAX_LONG (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_MAX_FLOAT (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_MAX_DOUBLE (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_MIN_INT (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_MIN_LONG (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_MIN_FLOAT (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_MIN_DOUBLE (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_PROD_INT (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_PROD_LONG (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_PROD_FLOAT (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_PROD_DOUBLE (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_COMM_INITIALIZED 45328976 #define RCCE_COMM_NOT_INITIALIZED -45328976 // auxiliary MPB pointer type typedef volatile unsigned int* t_vintp; // Also need dereferenced types typedef volatile unsigned char t_vchar; typedef volatile unsigned int t_vint; typedef struct rcce_block { t_vcharp space; // pointer to space for data in block size_t free_size; // actual free space in block (0 or whole block) size_t size; // size of an allocated block struct rcce_block *next; // pointer to next block in circular linked list } RCCE_BLOCK; #if defined(SINGLEBITFLAGS) || defined(USE_BYTE_FLAGS) typedef struct rcce_flag_line { char flag[RCCE_FLAGS_PER_LINE]; t_vcharp line_address; int members; struct rcce_flag_line *next; } RCCE_FLAG_LINE; #endif typedef struct { RCCE_BLOCK *tail; // "last" block in linked list of blocks } RCCE_BLOCK_S; #ifdef AIR #define FPGA_BASE 0xf9000000 #define BACKOFF_MIN 8 #define BACKOFF_MAX 256 typedef volatile struct _RCCE_AIR { int * counter; int * init; } RCCE_AIR; #endif #ifndef GORY extern RCCE_FLAG RCCE_sent_flag[RCCE_MAXNP]; extern RCCE_FLAG RCCE_ready_flag[RCCE_MAXNP]; #ifdef USE_PIPELINE_FLAGS extern RCCE_FLAG RCCE_sent_flag_pipe[RCCE_MAXNP]; extern RCCE_FLAG RCCE_ready_flag_pipe[RCCE_MAXNP]; #endif #ifdef USE_PROBE_FLAGS extern RCCE_FLAG RCCE_probe_flag[RCCE_MAXNP]; #endif extern t_vcharp RCCE_buff_ptr; extern size_t RCCE_chunk; extern t_vcharp RCCE_flags_start; #ifndef USE_REMOTE_PUT_LOCAL_GET extern RCCE_SEND_REQUEST* RCCE_send_queue; extern RCCE_RECV_REQUEST* RCCE_recv_queue[RCCE_MAXNP]; #else extern RCCE_SEND_REQUEST* RCCE_send_queue[RCCE_MAXNP]; extern RCCE_RECV_REQUEST* RCCE_recv_queue; #endif #endif //#ifdef USE_FLAG_EXPERIMENTAL extern t_vcharp RCCE_flag_buffer[RCCE_MAXNP]; //#endif #ifndef __hermit__ extern t_vcharp RCCE_fool_write_combine_buffer; #endif extern t_vcharp RCCE_comm_buffer[RCCE_MAXNP]; extern int RCCE_NP; extern int RCCE_BUFF_SIZE; #ifndef COPPERRIDGE extern omp_lock_t RCCE_corelock[RCCE_MAXNP]; extern t_vchar RC_comm_buffer[RCCE_MAXNP*RCCE_BUFF_SIZE_MAX]; extern t_vchar RC_shm_buffer[RCCE_SHM_SIZE_MAX]; #endif extern int RC_MY_COREID; extern int RC_COREID[RCCE_MAXNP]; extern double RC_REFCLOCKGHZ; extern int RCCE_IAM; extern int RCCE_debug_synch; extern int RCCE_debug_comm; extern int RCCE_debug_debug; extern int RCCE_debug_RPC; #ifdef SINGLEBITFLAGS extern RCCE_FLAG_LINE RCCE_flags; extern int WORDSIZE; extern int LEFTMOSTBIT; RCCE_FLAG_STATUS RCCE_bit_value(t_vcharp, int); RCCE_FLAG_STATUS RCCE_flip_bit_value(t_vcharp, int); int RCCE_write_bit_value(t_vcharp, int, RCCE_FLAG_STATUS); #endif extern int RCCE_comm_init_val; void RCCE_malloc_init(t_vcharp, size_t); void RCCE_shmalloc_init(t_vcharp, size_t); int RCCE_qsort(char *, size_t, size_t, int (*)(const void*, const void*)); int id_compare(const void *, const void *); #if 0 int RCCE_probe(RCCE_FLAG); #endif int RCCE_error_return(int, int); #ifdef __hermit__ #define RC_cache_invalidate() {} #else void RC_cache_invalidate(void); #endif int RCCE_acquire_treelock(RCCE_COMM*); int RCCE_release_treelock(RCCE_COMM*); int RCCE_TNS_barrier(RCCE_COMM*); int RCCE_acquire_lock(int); int RCCE_try_lock(int); int RCCE_backoff_lock(int); int RCCE_release_lock(int); int RCCE_global_color(int, void *); t_vcharp RC_COMM_BUFFER_START(int); //#ifdef USE_FLAG_EXPERIMENTAL t_vcharp RC_FLAG_BUFFER_START(int); //#endif #ifndef GORY t_vcharp RCCE_malloc(size_t); t_vcharp RCCE_malloc_request(size_t, size_t *); t_vcharp RCCE_palloc(size_t, int); void RCCE_free(t_vcharp); int RCCE_put(t_vcharp, t_vcharp, int, int); int RCCE_get(t_vcharp, t_vcharp, int, int); int RCCE_wait_until(RCCE_FLAG, RCCE_FLAG_STATUS); int RCCE_test_flag(RCCE_FLAG, RCCE_FLAG_STATUS, int *); int RCCE_flag_alloc(RCCE_FLAG *); int RCCE_flag_free(RCCE_FLAG *); int RCCE_flag_write(RCCE_FLAG *, RCCE_FLAG_STATUS, int); int RCCE_flag_read(RCCE_FLAG, RCCE_FLAG_STATUS *, int); #ifdef USE_FLAG_EXPERIMENTAL int RCCE_put_flag(t_vcharp, t_vcharp, int, int); int RCCE_get_flag(t_vcharp, t_vcharp, int, int); #endif #ifdef USE_TAGGED_FLAGS int RCCE_flag_write_tagged(RCCE_FLAG *, RCCE_FLAG_STATUS, int, void*, int); int RCCE_flag_read_tagged(RCCE_FLAG, RCCE_FLAG_STATUS *, int, void*, int); int RCCE_wait_tagged(RCCE_FLAG, RCCE_FLAG_STATUS, void *, int); int RCCE_test_tagged(RCCE_FLAG, RCCE_FLAG_STATUS, int *, void *, int); #endif #endif #if defined(_OPENMP) && !defined(__hermit__) #pragma omp threadprivate (RC_COREID, RC_MY_COREID, RC_REFCLOCKGHZ) #pragma omp threadprivate (RCCE_comm_buffer) #pragma omp threadprivate (RCCE_BUFF_SIZE) #pragma omp threadprivate (RCCE_IAM, RCCE_NP) #pragma omp threadprivate (RCCE_debug_synch, RCCE_debug_comm, RCCE_debug_debug) #ifdef SINGLEBITFLAGS #pragma omp threadprivate (RCCE_flags, WORDSIZE, LEFTMOSTBIT) #endif #ifndef GORY #pragma omp threadprivate (RCCE_send_queue, RCCE_recv_queue) #pragma omp threadprivate (RCCE_sent_flag, RCCE_ready_flag) #ifdef USE_PROBE_FLAGS #pragma omp threadprivate (RCCE_probe_flag) #endif #ifdef USE_PIPELINE_FLAGS #pragma omp threadprivate (RCCE_sent_flag_pipe, RCCE_ready_flag_pipe) #endif #pragma omp threadprivate (RCCE_buff_ptr, RCCE_chunk) #pragma omp threadprivate (RCCE_flags_start) #endif #endif #ifdef SHMADD unsigned int getCOREID(); unsigned int readTILEID(); unsigned int readLUT(unsigned int); void writeLUT(unsigned int, unsigned int); #endif #endif
ast-dump-openmp-section.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp sections { #pragma omp section ; } } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-section.c:3:1, line:9:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:9:1> // CHECK-NEXT: `-OMPSectionsDirective {{.*}} <line:4:1, col:21> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, line:8:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: |-CompoundStmt {{.*}} <line:5:3, line:8:3> // CHECK-NEXT: | `-OMPSectionDirective {{.*}} <line:6:1, col:20> // CHECK-NEXT: | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-section.c:4:1) *const restrict'
4-3t.c
#include <stdio.h> #include <omp.h> int main() { int i; omp_set_num_threads(3); #pragma omp parallel for for (i=0; i<16; i++) { printf("Hello from thread number: %d Iteration: %d \n", omp_get_thread_num(), i); } printf("\n GoodBye – Team Destroyed – Exiting Program \n\n"); }
exercise7.c
/* * BSD 2-Clause License * * Copyright (c) 2020, Alessandro Capotondi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file exercise7.c * @author Alessandro Capotondi * @date 27 Mar 2020 * @brief Exercise 7 * * @see https://dolly.fim.unimore.it/2019/course/view.php?id=152 */ #include <stdio.h> #include <omp.h> #include "utils.h" #if !defined(W) #define W (1000) #endif #if !defined(T) #define T (20) #endif /** * @brief EX 7 - Task Parallelism w/tasks * * a) Parallelize with TASK directive. * b) Parallelize with SINGLE NOWAIT directive. * c) Compare Results with a for loop. * @return void */ void exercise() { unsigned int i; #if 1 #pragma omp parallel #pragma omp single nowait for (i = 0; i < 16384; i++) { #pragma omp task { DEBUG_PRINT("%hu: I am executing iteration %hu!\n", omp_get_thread_num(), i); work(W); } } #endif #if 0 #pragma omp parallel for (i = 0; i < 16384; i++) { #pragma omp single nowait { DEBUG_PRINT("%hu: I am executing iteration %hu!\n", omp_get_thread_num(), i); work(W); } } #endif #if 0 #pragma omp parallel for for (i = 0; i < 16384; i++) { DEBUG_PRINT("%hu: I am executing iteration %hu!\n", omp_get_thread_num(), i); work(W); } #endif }
mass_matrix.h
/** * @author : Zhao Chonyyao (cyzhao@zju.edu.cn) * @date : 2021-04-30 * @description: mass matrix for finite element method. * @version : 1.0 */ #ifndef MASS_MATRIX_H #define MASS_MATRIX_H #include <Eigen/Dense> #include <Eigen/Sparse> #include <iostream> #include <vector> #include "Common/eigen_ext.h" namespace PhysIKA { // using mati_t=zjucad::matrix::matrix<size_t>; // using matd_t=zjucad::matrix::matrix<double>; // using spmat_t=Eigen::SparseMatrix<double>; // int calc_mass_matrix(const mati_t &cell, // const matd_t &nods, // const double rho, // const size_t dim, // spmat_t *M, // bool lumped); // int calc_surf_mass_matrix(const mati_t &cell, const matd_t &nods, // const double rho, spmat_t *M); //TODO: integrate mass with baiss and quadrature template <typename T, size_t dim_, size_t num_per_cell_> int calc_mass_vector(const Eigen::Matrix<T, dim_, -1>& nods, const Eigen::Matrix<size_t, num_per_cell_, -1>& cells, const T& rho, Eigen::Matrix<T, -1, 1>& mass_vector) { std::cout << "use partial specilization" << std::endl; } template <typename T> int calc_mass_vector(const Eigen::Matrix<T, 3, -1>& nods, const Eigen::MatrixXi& cells, const T& rho, Eigen::Matrix<T, -1, 1>& mass_vector) { const size_t num_nods = nods.cols(); const size_t dim = nods.rows(); const Eigen::Matrix<int, 3, 1> all_rows = Eigen::Matrix<int, 3, 1>::LinSpaced(dim, 0, dim - 1); mass_vector.resize(num_nods); mass_vector.setZero(); for (size_t cell_id = 0; cell_id < cells.cols(); ++cell_id) { Eigen::Matrix<T, 3, 4> one_tet_ = indexing(nods, all_rows, cells.col(cell_id)); Eigen::Matrix<T, 3, 3> one_cell = one_tet_.block(0, 0, 3, 3) - one_tet_.col(3) * Eigen::Matrix<T, 1, 3>::Ones(); T volume = fabs(one_cell.determinant()) / 6.0; T coeff = rho * volume / 4.0; for (size_t p_id = 0; p_id < cells.rows(); ++p_id) mass_vector(cells(p_id, cell_id)) += coeff; } return 0; } template <typename T, size_t dim_, size_t num_per_cell_, size_t bas_order_, size_t num_qdrt_, template <typename, size_t, size_t, size_t, size_t> class BASIS, // basis template <typename, size_t, size_t, size_t> class QDRT> // int mass_calculator(const Eigen::Matrix<T, dim_, -1>& nods, const Eigen::Matrix<int, num_per_cell_, -1>& cells, const T& rho, Eigen::Matrix<T, -1, 1>& mass_vector) { using basis = BASIS<T, dim_, 1, bas_order_, num_per_cell_>; using qdrt = QDRT<T, dim_, num_qdrt_, num_per_cell_>; const size_t num_cells = cells.cols(), num_nods = nods.cols(); const Eigen::Matrix<int, dim_, 1> all_rows_ = Eigen::Matrix<int, dim_, 1>::LinSpaced(dim_, 0, dim_ - 1); const qdrt quadrature_ = qdrt(); mass_vector = Eigen::Matrix<T, -1, 1>(num_nods); mass_vector.setZero(); std::vector<Eigen::Triplet<T>> trips; #pragma omp parallel for for (size_t cell_id = 0; cell_id < num_cells; ++cell_id) { const Eigen::Matrix<T, dim_, num_per_cell_> X_cell = indexing(nods, all_rows_, cells.col(cell_id)); T mass = 0.0; for (size_t qdrt_id = 0; qdrt_id < quadrature_.WGT_.size(); ++qdrt_id) { Eigen::Matrix<T, num_per_cell_, dim_> Dphi_Dxi_tmp; Eigen::Matrix<T, dim_, dim_> Dm_inv_tmp; T jac_det_tmp; basis::calc_Dphi_Dxi(quadrature_.PNT_.col(qdrt_id), X_cell.data(), Dphi_Dxi_tmp); basis::calc_InvDm_Det(Dphi_Dxi_tmp, X_cell.data(), jac_det_tmp, Dm_inv_tmp); mass += quadrature_.WGT_[qdrt_id] * jac_det_tmp; } mass *= rho / num_per_cell_; for (size_t p = 0; p < cells.rows(); ++p) for (size_t q = p; q < cells.rows(); ++q) { #pragma omp critical { trips.push_back(Eigen::Triplet<T>(cells(p, cell_id), cells(q, cell_id), mass)); trips.push_back(Eigen::Triplet<T>(cells(q, cell_id), cells(p, cell_id), mass)); } } } #pragma omp parallel for for (size_t i = 0; i < trips.size(); ++i) { trips[i] = Eigen::Triplet<T>(trips[i].row(), trips[i].row(), trips[i].value()); } Eigen::SparseMatrix<T> mass(num_nods, num_nods); mass.reserve(trips.size()); mass.setFromTriplets(trips.begin(), trips.end()); #pragma omp parallel for for (size_t i = 0; i < num_nods; ++i) { mass_vector[i] = mass.coeff(i, i); } return 0; } } // namespace PhysIKA #endif
8240.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11) #pragma omp target teams distribute #p #p for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
laplace2d.c
/* * Copyright 2012 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include <string.h> #include <openacc.h> #include "timer.h" #define NN 4096 #define NM 4096 double A[NN][NM]; double Anew[NN][NM]; int main(int argc, char** argv) { const int n = NN; const int m = NM; const int iter_max = 1000; const double tol = 1.0e-6; double error = 1.0; memset(A, 0, n * m * sizeof(double)); memset(Anew, 0, n * m * sizeof(double)); for (int j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; while ( error > tol && iter < iter_max ) { error = 0.0; #pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmax( error, fabs(Anew[j][i] - A[j][i])); } } #pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000); }
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // use the id(...) matcher around the match expressions that match the nodes // you want to access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(id("child", recordDecl()))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the id(...) calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::ast_type_traits::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// If the provided matcher matches a node, binds the node to \c ID. /// /// FIXME: Do we want to support this now that we have bind()? template <typename T> internal::Matcher<T> id(StringRef ID, const internal::BindableMatcher<T> &InnerMatcher) { return InnerMatcher.bind(ID); } /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); llvm::Regex RE(RegExp); return RE.match(Filename); } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches public C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPublic()) /// matches 'int a;' AST_MATCHER(Decl, isPublic) { return Node.getAccess() == AS_public; } /// Matches protected C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isProtected()) /// matches 'int b;' AST_MATCHER(Decl, isProtected) { return Node.getAccess() == AS_protected; } /// Matches private C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPrivate()) /// matches 'int c;' AST_MATCHER(Decl, isPrivate) { return Node.getAccess() == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that referes to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::Matcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::Matcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(const std::string &Name) { return internal::Matcher<NamedDecl>(new internal::HasNameMatcher({Name})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) { assert(!RegExp.empty()); std::string FullNameString = "::" + Node.getQualifiedNameAsString(); llvm::Regex RE(RegExp); return RE.match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name); } /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, Builder); } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) { assert(!RegExp.empty()); std::string SelectorString = Node.getSelector().getAsString(); llvm::Regex RE(RegExp); return RE.match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<ValueDecl> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N) { return Node.getNumArgs() == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { return (N < Node.getNumArgs() && InnerMatcher.matches( *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = ast_type_traits::DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P(hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator), std::string, Name) { return Name == Node.getOpcodeStr(Node.getOpcode()); } /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; }) /// \endcode AST_POLYMORPHIC_MATCHER(isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isAssignmentOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = Node.getLHS(); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = Node.getRHS(); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. inline internal::Matcher<BinaryOperator> hasEitherOperand( const internal::Matcher<Expr> &InnerMatcher) { return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_MATCHER_P(UnaryOperator, hasUnaryOperand, internal::Matcher<Expr>, InnerMatcher) { const Expr * const Operand = Node.getSubExpr(); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches RecordDecl object that are spelled with "struct." /// /// Example matches S, but not C or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isStruct) { return Node.isStruct(); } /// Matches RecordDecl object that are spelled with "union." /// /// Example matches U, but not C or S. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isUnion) { return Node.isUnion(); } /// Matches RecordDecl object that are spelled with "class." /// /// Example matches C, but not S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isClass) { return Node.isClass(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches if the given method declaration is virtual. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isVirtual) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whos decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) { return anyOf( gnuNullExpr(), cxxNullPtrLiteralExpr(), integerLiteral(equals(0), hasParent(expr(hasType(pointerType()))))); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(intgerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->GetTemporaryExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the Stmt AST node that is marked as being the structured-block /// of an OpenMP executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// \endcode /// /// ``stmt(isOMPStructuredBlock()))`` matches ``{}``. AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``. extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == OMPC_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == OMPC_DEFAULT_shared; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return isAllowedClauseForDirective(Node.getDirectiveKind(), CKind); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
gi_regular_grid_trilinear_function_uncached.h
/* * * Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef REGULAR_GRID_TRILINEAR_FUNCTION_UNCACHED #define REGULAR_GRID_TRILINEAR_FUNCTION_UNCACHED #include <algorithm> #include <cmath> #include "base/gi_basic_types.h" #include "base/gi_vectors.h" #include "base/gi_regular_grid_3d.h" namespace GInt { class UncachedRegularGridTrilinearFunction { protected: RegularGrid3D * m_grid; FLOATTYPE* m_image; FLOATTYPE m_min_value; FLOATTYPE m_max_value; bool m_i_made_gradient; bool m_i_made_image; void fill_extents() { FLOATTYPE t_max_val = m_max_value = m_image[0]; FLOATTYPE t_min_val = m_min_value = m_image[0]; INDEX_TYPE num_elements = m_grid->NumElements(); INDEX_TYPE ii; #pragma omp parallel shared(num_elements) private(ii) firstprivate(t_max_val,t_min_val) { #pragma omp for nowait for (ii = 0; ii<num_elements; ++ii) { if (m_image[ii] > t_max_val) { t_max_val = m_image[ii]; } if (m_image[ii] < t_min_val) { t_min_val = m_image[ii]; } } #pragma omp critical { if (t_max_val > m_max_value) m_max_value = t_max_val; if (t_min_val < m_min_value) m_min_value = t_min_val; } } } public: FLOATTYPE GetMinValue() const { return m_min_value; } FLOATTYPE GetMaxValue() const { return m_max_value; } UncachedRegularGridTrilinearFunction(RegularGrid3D* grid, FLOATTYPE *image = 0) : m_grid(grid) { m_i_made_image = false; m_i_made_gradient = false; m_image = NULL; // use the function if it is passed, otherwise simply allocate memory if(image != 0) { m_image = image; } //m_grad = new Vec3d[m_grid->NumElements()]; } ~UncachedRegularGridTrilinearFunction() { if (m_i_made_image) delete[] m_image; } // return pointer to underlying mesh and function const RegularGrid3D* GetGrid() const { return m_grid; } FLOATTYPE* GetImage() const { return m_image; } // sample the image at integral location FLOATTYPE SampleImage(const Vec3l& p) const { return m_image[m_grid->Index3d(p)]; } // sample the image at integral location FLOATTYPE SampleImage(const INDEX_TYPE id) const { return m_image[id]; } static const FLOATTYPE kRKCoefficients[5][9]; Vec3d GradientFromImage(const Vec3l& p, int rklevel) const { Vec3l negs[9]; // don't support more than 4th order - cmon. would be ridiculous double res_x = 0.0; int rklevel_x = m_grid->Gather1DNeighborhood(p, 0, rklevel, negs); int nume_x = rklevel_x * 2 + 1; // number of entries to average for (int i = 0; i < nume_x; i++) { res_x += kRKCoefficients[rklevel_x][i] * SampleImage(negs[i]); } double res_y = 0.0; int rklevel_y = m_grid->Gather1DNeighborhood(p, 1, rklevel, negs); int nume_y = rklevel_y * 2 + 1; // number of entries to average for (int i = 0; i < nume_y; i++) { res_y += kRKCoefficients[rklevel_y][i] * SampleImage(negs[i]); } double res_z = 0.0; int rklevel_z = m_grid->Gather1DNeighborhood(p, 2, rklevel, negs); int nume_z = rklevel_z * 2 + 1; // number of entries to average for (int i = 0; i < nume_z; i++) { res_z += kRKCoefficients[rklevel_z][i] * SampleImage(negs[i]); } return Vec3d(res_x, res_y, res_z); } // sample the gradient at integral location const Vec3d& SampleGrad(const Vec3l& p) const { return GradientFromImage(p, 1); } FLOATTYPE TriLinInterpValue(const Vec3d& s) const { Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond. m_grid->GatherSurrounding(s, n); Vec3d b = n[0]; //s.print_vf(); //b.print_vf(); Vec3d factors = s - b; FLOATTYPE x0 = (1 - factors[0]) * SampleImage(n[0]) + SampleImage(n[1]) * factors[0]; FLOATTYPE x1 = (1 - factors[0]) * SampleImage(n[2]) + SampleImage(n[3]) * factors[0]; FLOATTYPE x2 = (1 - factors[0]) * SampleImage(n[4]) + SampleImage(n[5]) * factors[0]; FLOATTYPE x3 = (1 - factors[0]) * SampleImage(n[6]) + SampleImage(n[7]) * factors[0]; FLOATTYPE y0 = (1 - factors[1]) *x0 + x1 * factors[1]; FLOATTYPE y1 = (1 - factors[1]) *x2 + x3 * factors[1]; return (1 - factors[2]) *y0 + y1 * factors[2]; } // return trilinearly interpolated value Vec3d TriLinInterpGrad(const Vec3d& s) const { Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond. m_grid->GatherSurrounding(s, n); Vec3d b = n[0]; //s.print_vf(); //b.print_vf(); Vec3d factors = s - b; Vec3d x0 = Vec3d::Lerp(SampleGrad(n[0]), SampleGrad(n[1]), factors[0]); Vec3d x1 = Vec3d::Lerp(SampleGrad(n[2]), SampleGrad(n[3]), factors[0]); Vec3d x2 = Vec3d::Lerp(SampleGrad(n[4]), SampleGrad(n[5]), factors[0]); Vec3d x3 = Vec3d::Lerp(SampleGrad(n[6]), SampleGrad(n[7]), factors[0]); Vec3d y0 = Vec3d::Lerp(x0, x1, factors[1]); Vec3d y1 = Vec3d::Lerp(x2, x3, factors[1]); return Vec3d::Lerp(y0, y1, factors[2]); } void SetGradExplicit(INDEX_TYPE id, Vec3d vec) { //this->m_grad[id] = vec; } // fill in vals with the 8 values of hte gradient around sample poitn void GetGradSurrounding(const Vec3d& s, Vec3d* vals) const { Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond. m_grid->GatherSurrounding(s, n); for (int i = 0; i < 8; i++) vals[i] = SampleGrad(n[i]); } void GetGradSurrounding(const Vec3l& s, Vec3d* vals) const { Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond. m_grid->GatherSurrounding(s, n); for (int i = 0; i < 8; i++) vals[i] = SampleGrad(n[i]); } // use with extreme care - no boundary checks, only do on really interior poitns void GetGradSurroundingNoBoundaryCheck(const Vec3d& s, Vec3d* vals) const { Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond. m_grid->GatherSurroundingNoBoundaryCheck(s, n); for (int i = 0; i < 8; i++) vals[i] = SampleGrad(n[i]); } FLOATTYPE InterpolatedValue(const Vec3d& s) const { return TriLinInterpValue(s); } Vec3d InterpolatedGrad(const Vec3d& s) const { return TriLinInterpGrad(s); } // allow reuse of sampled gradient - the assumption that vals has the gradient arrows around s Vec3d TriLinInterpGrad(const Vec3d& s, const Vec3l& int_base, Vec3d* vals) const { //if (!(s.IntFloor() == int_base)) { // printf("s="); s.PrintFloat(); printf("d="); int_base.PrintFloat(); //} // //Vec3d d = int_base.IntFloor(); Vec3d factors = s - int_base; Vec3d x0 = Vec3d::Lerp(vals[0], vals[1], factors[0]); Vec3d x1 = Vec3d::Lerp(vals[2], vals[3], factors[0]); Vec3d x2 = Vec3d::Lerp(vals[4], vals[5], factors[0]); Vec3d x3 = Vec3d::Lerp(vals[6], vals[7], factors[0]); Vec3d y0 = Vec3d::Lerp(x0, x1, factors[1]); Vec3d y1 = Vec3d::Lerp(x2, x3, factors[1]); return Vec3d::Lerp(y0, y1, factors[2]); } void LoadImageFromFloatFile(const char* fname) { size_t image_size = m_grid->NumElements(); // fill in image m_image = new FLOATTYPE[image_size]; m_i_made_image = true; FILE* fin = fopen(fname, "rb"); for (INDEX_TYPE i = 0; i < image_size; i++) { float tval = 0; fread(&tval, sizeof(float), 1, fin); m_image[i] = tval; } fclose(fin); fill_extents(); printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value); } void LoadImageFromFile(const char* fname) { size_t image_size = m_grid->NumElements(); // fill in image m_image = new FLOATTYPE[image_size]; m_i_made_image = true; FILE* fin = fopen(fname, "rb"); fread(m_image, sizeof(FLOATTYPE), image_size, fin); fclose(fin); fill_extents(); printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value); } void ShallowCopyImage(FLOATTYPE *image) { m_image = image; INDEX_TYPE image_size = m_grid->NumElements(); fill_extents(); printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value); } void DeepCopyImage(const FLOATTYPE *image) { m_image = new FLOATTYPE[m_grid->NumElements()]; m_i_made_image = true; INDEX_TYPE image_size = m_grid->NumElements(); memcpy(m_image, image, image_size*sizeof(FLOATTYPE)); fill_extents(); printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value); } inline bool IsGreater(INDEX_TYPE a, INDEX_TYPE b) const { if (m_image[a] > m_image[b]) return true; if (m_image[b] > m_image[a]) return false; //if (a == b) printf("WHOA THERE NELLY\n"); return a > b; } //Vec3d IStep(const Vec3d& p, const Vec3d& grad, const FLOATTYPE h) const { // return m_grid->Inbounds(p + (grad * h)); //} //Vec3d IStepNoBoundaryCheck(const Vec3d& p, const Vec3d& grad, const FLOATTYPE h) const { // return p + (grad * h); //} // add in block structure void ComputeGradFromImage(int rklevel) { } void Negate() { #pragma omp parallel for schedule(static) for (INDEX_TYPE i = 0; i < m_grid->NumElements(); i++) { this->m_image[i] *= -1; } } }; }; #endif
ellipticSEMFEMSetup.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "elliptic.h" typedef struct{ dfloat VX; dfloat VY; dlong localId; hlong globalId; }FEMverts_t; typedef struct { dlong localId; hlong globalId; int ownerRank; }parallelNode_t; // compare on global owners int parallelCompareOwnersAndGlobalId(const void *a, const void *b); // compare on global indices int parallelCompareGlobalId(const void *a, const void *b); // compare xy coordinates int parallelCompareFEMvertsLocation(const void *a, const void *b){ dfloat NODETOL = 1e-6; FEMverts_t *fa = (FEMverts_t*) a; FEMverts_t *fb = (FEMverts_t*) b; if(fa->VX < fb->VX - NODETOL) return -1; if(fa->VX > fb->VX + NODETOL) return +1; if(fa->VY < fb->VY - NODETOL) return -1; if(fa->VY > fb->VY + NODETOL) return +1; return 0; } // compare local id int parallelCompareFEMvertsLocalId(const void *a, const void *b){ FEMverts_t *fa = (FEMverts_t*) a; FEMverts_t *fb = (FEMverts_t*) b; if(fa->localId < fb->localId) return -1; if(fa->localId > fb->localId) return +1; return 0; } int parallelCompareRowColumn(const void *a, const void *b); void BuildFEMMatrixTri2D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void BuildFEMMatrixTet3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void BuildFEMMatrixHex3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void ellipticSEMFEMSetup(elliptic_t *elliptic, precon_t* precon, dfloat lambda) { setupAide options = elliptic->options; if (!(options.compareArgs("DISCRETIZATION", "CONTINUOUS"))) { printf("SEMFEM is supported for CONTINUOUS only\n"); MPI_Barrier(elliptic->mesh->comm); MPI_Finalize(); exit(0); } mesh_t* mesh = elliptic->mesh; //original mesh mesh_t* pmesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //partially assembled fem mesh (result of projecting sem element to larger space) precon->femMesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //full fem mesh mesh_t *femMesh = precon->femMesh; memcpy(pmesh ,mesh,sizeof(mesh_t)); memcpy(femMesh,mesh,sizeof(mesh_t)); if (elliptic->elementType==TRIANGLES) { //set semfem nodes as the grid points pmesh->Np = mesh->NpFEM; pmesh->r = mesh->rFEM; pmesh->s = mesh->sFEM; //count number of face nodes in the semfem element dfloat NODETOL = 1e-6; pmesh->Nfp=0; for (int n=0;n<pmesh->Np;n++) if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->Nfp++; //remake the faceNodes array pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int)); int f0=0, f1=0, f2=0; for (int n=0;n<pmesh->Np;n++) { if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n; if (fabs(pmesh->r[n]+pmesh->s[n])<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n; if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n; } //remake vertexNodes array pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int)); for(int n=0;n<pmesh->Np;++n){ if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL) pmesh->vertexNodes[0] = n; if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL) pmesh->vertexNodes[1] = n; if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)<NODETOL) pmesh->vertexNodes[2] = n; } // connect elements using parallel sort meshParallelConnect(pmesh); // compute physical (x,y) locations of the element nodes meshPhysicalNodesTri2D(pmesh); // free(sendBuffer); meshHaloSetup(pmesh); // connect face nodes (find trace indices) meshConnectFaceNodes2D(pmesh); // global nodes meshParallelConnectNodes(pmesh); //pmesh->globalIds is now populated } else if (elliptic->elementType==TETRAHEDRA) { //set semfem nodes as the grid points pmesh->Np = mesh->NpFEM; pmesh->r = mesh->rFEM; pmesh->s = mesh->sFEM; pmesh->t = mesh->tFEM; //count number of face nodes in the semfem element dfloat NODETOL = 1e-6; pmesh->Nfp=0; for (int n=0;n<pmesh->Np;n++) if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->Nfp++; //remake the faceNodes array pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int)); int f0=0, f1=0, f2=0, f3=0; for (int n=0;n<pmesh->Np;n++) { if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n; if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n; if (fabs(pmesh->r[n]+pmesh->s[n]+ pmesh->t[n]+1.0)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n; if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[3*pmesh->Nfp+f3++] = n; } //remake vertexNodes array pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int)); for(int n=0;n<pmesh->Np;++n){ if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL) pmesh->vertexNodes[0] = n; if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL) pmesh->vertexNodes[1] = n; if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL) pmesh->vertexNodes[2] = n; if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]-1)*(pmesh->t[n]-1)<NODETOL) pmesh->vertexNodes[3] = n; } // connect elements using parallel sort meshParallelConnect(pmesh); // compute physical (x,y) locations of the element nodes meshPhysicalNodesTet3D(pmesh); // free(sendBuffer); meshHaloSetup(pmesh); // connect face nodes (find trace indices) meshConnectFaceNodes3D(pmesh); // global nodes meshParallelConnectNodes(pmesh); //pmesh->globalIds is now populated } //now build the full degree 1 fem mesh int femN = 1; //degree of fem approximation /* allocate space for node coordinates */ femMesh->Nelements = mesh->NelFEM*mesh->Nelements; femMesh->EToV = (hlong*) calloc(femMesh->Nelements*femMesh->Nverts, sizeof(hlong)); femMesh->EX = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat)); femMesh->EY = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat)); if (elliptic->dim==3) femMesh->EZ = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat)); dlong *localIds = (dlong *) calloc(femMesh->Nverts*femMesh->Nelements,sizeof(dlong)); // dlong NFEMverts = mesh->Nelements*mesh->NpFEM; for(dlong e=0;e<mesh->Nelements;++e){ for (int n=0;n<mesh->NelFEM;n++) { dlong id[femMesh->Nverts]; dlong femId = e*mesh->NelFEM*mesh->Nverts+n*mesh->Nverts; for (int i=0;i<femMesh->Nverts;i++) { //local ids in the subelement fem grid id[i] = e*mesh->NpFEM + mesh->FEMEToV[n*mesh->Nverts+i]; /* read vertex triplet for triangle */ femMesh->EToV[femId+i] = pmesh->globalIds[id[i]]; femMesh->EX[femId+i] = pmesh->x[id[i]]; femMesh->EY[femId+i] = pmesh->y[id[i]]; if (elliptic->dim==3) femMesh->EZ[femId+i] = pmesh->z[id[i]]; } switch(elliptic->elementType){ case TRIANGLES: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[2]; break; case QUADRILATERALS: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2] in a degree 1 element localIds[femId+3] = id[2]; break; case TETRAHEDRA: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[2]; localIds[femId+3] = id[3]; break; case HEXAHEDRA: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2,4,5,7,6] in a degree 1 element localIds[femId+3] = id[2]; localIds[femId+4] = id[4]; localIds[femId+5] = id[5]; localIds[femId+6] = id[7]; localIds[femId+7] = id[6]; break; } } } // connect elements using parallel sort meshParallelConnect(femMesh); switch(elliptic->elementType){ case TRIANGLES: meshLoadReferenceNodesTri2D(femMesh, femN); break; case QUADRILATERALS: meshLoadReferenceNodesQuad2D(femMesh, femN); break; case TETRAHEDRA: meshLoadReferenceNodesTet3D(femMesh, femN); break; case HEXAHEDRA: meshLoadReferenceNodesHex3D(femMesh, femN); break; } int *faceFlag = (int*) calloc(pmesh->Np*pmesh->Nfaces,sizeof(int)); for (int f=0;f<pmesh->Nfaces;f++) { for (int n=0;n<pmesh->Nfp;n++) { int id = pmesh->faceNodes[f*pmesh->Nfp+n]; faceFlag[f*pmesh->Np + id] = 1; //flag the nodes on this face } } //map from faces of fem sub-elements to the macro element face number int *femFaceMap = (int*) calloc(mesh->NelFEM*femMesh->Nfaces,sizeof(int)); for (int n=0;n<mesh->NelFEM*femMesh->Nfaces;n++) femFaceMap[n] = -1; for (int n=0;n<mesh->NelFEM;n++) { for (int f=0;f<femMesh->Nfaces;f++) { for (int face=0; face<pmesh->Nfaces;face++) { //count the nodes on this face which are on a macro face int NvertsOnFace = 0; for (int i=0;i<femMesh->Nfp;i++){ int id = femMesh->faceNodes[f*femMesh->Nfp+i]; int v = mesh->FEMEToV[n*pmesh->Nverts+id]; NvertsOnFace += faceFlag[face*pmesh->Np + v]; } if (NvertsOnFace == femMesh->Nfp) femFaceMap[n*femMesh->Nfaces+f] = face; //on macro face } } } //fill the boundary flag array femMesh->EToB = (int*) calloc(femMesh->Nelements*femMesh->Nfaces, sizeof(int)); for (dlong e=0;e<mesh->Nelements;e++) { for (int n=0;n<mesh->NelFEM;n++) { for (int f=0;f<femMesh->Nfaces;f++) { int face = femFaceMap[n*femMesh->Nfaces+f]; if (face>-1) { femMesh->EToB[(e*mesh->NelFEM +n)*femMesh->Nfaces +f] = mesh->EToB[e*mesh->Nfaces + face]; } } } } free(faceFlag); free(femFaceMap); switch(elliptic->elementType){ case TRIANGLES: meshPhysicalNodesTri2D(femMesh); meshGeometricFactorsTri2D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes2D(femMesh); meshSurfaceGeometricFactorsTri2D(femMesh); break; case QUADRILATERALS: meshPhysicalNodesQuad2D(femMesh); meshGeometricFactorsQuad2D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes2D(femMesh); meshSurfaceGeometricFactorsQuad2D(femMesh); break; case TETRAHEDRA: meshPhysicalNodesTet3D(femMesh); meshGeometricFactorsTet3D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes3D(femMesh); meshSurfaceGeometricFactorsTet3D(femMesh); break; case HEXAHEDRA: meshPhysicalNodesHex3D(femMesh); meshGeometricFactorsHex3D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes3D(femMesh); meshSurfaceGeometricFactorsHex3D(femMesh); break; } // global nodes meshParallelConnectNodes(femMesh); dlong Ntotal = pmesh->Np*pmesh->Nelements; int verbose = options.compareArgs("VERBOSE","TRUE") ? 1:0; pmesh->maskedGlobalIds = (hlong *) calloc(Ntotal,sizeof(hlong)); memcpy(pmesh->maskedGlobalIds, pmesh->globalIds, Ntotal*sizeof(hlong)); if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) { //build a new mask for NpFEM>Np node sets // gather-scatter pmesh->ogs = ogsSetup(Ntotal, pmesh->globalIds, mesh->comm, verbose, mesh->device); //make a node-wise bc flag using the gsop (prioritize Dirichlet boundaries over Neumann) int *mapB = (int *) calloc(Ntotal,sizeof(int)); for (dlong e=0;e<pmesh->Nelements;e++) { for (int n=0;n<pmesh->Np;n++) mapB[n+e*pmesh->Np] = 1E9; for (int f=0;f<pmesh->Nfaces;f++) { int bc = pmesh->EToB[f+e*pmesh->Nfaces]; if (bc>0) { for (int n=0;n<pmesh->Nfp;n++) { int BCFlag = elliptic->BCType[bc]; int fid = pmesh->faceNodes[n+f*pmesh->Nfp]; mapB[fid+e*pmesh->Np] = mymin(BCFlag,mapB[fid+e*pmesh->Np]); } } } } ogsGatherScatter(mapB, ogsInt, ogsMin, pmesh->ogs); //use the bc flags to find masked ids for (dlong n=0;n<pmesh->Nelements*pmesh->Np;n++) { if (mapB[n] == 1) { //Dirichlet boundary pmesh->maskedGlobalIds[n] = 0; } } free(mapB); } else { //mask using the original mask for (dlong n=0;n<elliptic->Nmasked;n++) pmesh->maskedGlobalIds[elliptic->maskIds[n]] = 0; } //build masked gs handle precon->FEMogs = ogsSetup(Ntotal, pmesh->maskedGlobalIds, mesh->comm, verbose, mesh->device); // number of degrees of freedom on this rank (after gathering) hlong Ngather = precon->FEMogs->Ngather; // create a global numbering system hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong)); int *owner = (int *) calloc(Ngather,sizeof(int)); // every gathered degree of freedom has its own global id hlong *globalStarts = (hlong *) calloc(mesh->size+1,sizeof(hlong)); MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm); for(int r=0;r<mesh->size;++r) globalStarts[r+1] = globalStarts[r]+globalStarts[r+1]; //use the offsets to set a consecutive global numbering for (dlong n =0;n<precon->FEMogs->Ngather;n++) { globalIds[n] = n + globalStarts[mesh->rank]; owner[n] = mesh->rank; } //scatter this numbering to the original nodes hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong)); int *globalOwners = (int *) calloc(Ntotal,sizeof(int)); for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1; ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, precon->FEMogs); ogsScatter(globalOwners, owner, ogsInt, ogsAdd, precon->FEMogs); free(globalIds); free(owner); if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) { //dont need these anymore free(pmesh->vmapM); free(pmesh->vmapP); free(pmesh->mapP); //maybe more cleanup can go here } if (elliptic->elementType==TRIANGLES) { //build stiffness matrices femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); for (int n=0;n<femMesh->Np;n++) { for (int m=0;m<femMesh->Np;m++) { for (int k=0;k<femMesh->Np;k++) { for (int l=0;l<femMesh->Np;l++) { femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; } } } } } else if (elliptic->elementType==TETRAHEDRA) { //build stiffness matrices femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Srt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sst = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Str = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sts = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Stt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); for (int n=0;n<femMesh->Np;n++) { for (int m=0;m<femMesh->Np;m++) { for (int k=0;k<femMesh->Np;k++) { for (int l=0;l<femMesh->Np;l++) { femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Srt[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np]; femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Sst[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np]; femMesh->Str[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Sts[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Stt[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np]; } } } } } if (mesh->rank==0) printf("Building full SEMFEM matrix..."); fflush(stdout); // Build non-zeros of stiffness matrix (unassembled) dlong nnzLocal = femMesh->Np*femMesh->Np*femMesh->Nelements; dlong cnt =0; nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); int *AsendCounts = (int*) calloc(mesh->size, sizeof(int)); int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int)); int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int)); //Build unassembed non-zeros switch(elliptic->elementType){ case TRIANGLES: BuildFEMMatrixTri2D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; case QUADRILATERALS: BuildFEMMatrixQuad2D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; case TETRAHEDRA: BuildFEMMatrixTet3D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; case HEXAHEDRA: BuildFEMMatrixHex3D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; } // Make the MPI_NONZERO_T data type MPI_Datatype MPI_NONZERO_T; MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; int blength[4] = {1, 1, 1, 1}; MPI_Aint addr[4], displ[4]; MPI_Get_address ( &(sendNonZeros[0] ), addr+0); MPI_Get_address ( &(sendNonZeros[0].col ), addr+1); MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2); MPI_Get_address ( &(sendNonZeros[0].val ), addr+3); displ[0] = 0; displ[1] = addr[1] - addr[0]; displ[2] = addr[2] - addr[0]; displ[3] = addr[3] - addr[0]; MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); MPI_Type_commit (&MPI_NONZERO_T); // count how many non-zeros to send to each process for(dlong n=0;n<cnt;++n) AsendCounts[sendNonZeros[n].ownerRank]++; // sort by row ordering qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // find how many nodes to expect (should use sparse version) MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // find send and recv offsets for gather dlong nnz = 0; for(int r=0;r<mesh->size;++r){ AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r]; ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r]; nnz += ArecvCounts[r]; } nonZero_t *A = (nonZero_t*) calloc(nnz, sizeof(nonZero_t)); // determine number to receive MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, A, ArecvCounts, ArecvOffsets, MPI_NONZERO_T, mesh->comm); // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort(A, nnz, sizeof(nonZero_t), parallelCompareRowColumn); // compress duplicates cnt = 0; for(dlong n=1;n<nnz;++n){ if(A[n].row == A[cnt].row && A[n].col == A[cnt].col){ A[cnt].val += A[n].val; } else{ ++cnt; A[cnt] = A[n]; } } if (nnz) cnt++; nnz = cnt; if(mesh->rank==0) printf("done.\n"); MPI_Barrier(mesh->comm); MPI_Type_free(&MPI_NONZERO_T); hlong *Rows = (hlong *) calloc(nnz, sizeof(hlong)); hlong *Cols = (hlong *) calloc(nnz, sizeof(hlong)); dfloat *Vals = (dfloat*) calloc(nnz,sizeof(dfloat)); for (dlong n=0;n<nnz;n++) { Rows[n] = A[n].row; Cols[n] = A[n].col; Vals[n] = A[n].val; } free(A); precon->parAlmond = parAlmond::Init(mesh->device, mesh->comm, options); parAlmond::AMGSetup(precon->parAlmond, globalStarts, nnz, Rows, Cols, Vals, elliptic->allNeumann, elliptic->allNeumannPenalty); free(Rows); free(Cols); free(Vals); if (options.compareArgs("VERBOSE", "TRUE")) parAlmond::Report(precon->parAlmond); if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) { // //tell parAlmond not to gather this level (its done manually) // agmgLevel *baseLevel = precon->parAlmond->levels[0]; // baseLevel->gatherLevel = false; // baseLevel->weightedInnerProds = false; // build interp and anterp dfloat *SEMFEMAnterp = (dfloat*) calloc(mesh->NpFEM*mesh->Np, sizeof(dfloat)); for(int n=0;n<mesh->NpFEM;++n){ for(int m=0;m<mesh->Np;++m){ SEMFEMAnterp[n+m*mesh->NpFEM] = mesh->SEMFEMInterp[n*mesh->Np+m]; } } mesh->o_SEMFEMInterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),mesh->SEMFEMInterp); mesh->o_SEMFEMAnterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),SEMFEMAnterp); free(SEMFEMAnterp); precon->o_rFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat)); precon->o_zFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat)); precon->o_GrFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat)); precon->o_GzFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat)); } else { // //tell parAlmond to gather this level // agmgLevel *baseLevel = precon->parAlmond->levels[0]; // baseLevel->gatherLevel = true; parAlmond::multigridLevel *baseLevel = precon->parAlmond->levels[0]; precon->rhsG = (dfloat*) calloc(baseLevel->Ncols,sizeof(dfloat)); precon->xG = (dfloat*) calloc(baseLevel->Ncols,sizeof(dfloat)); precon->o_rhsG = mesh->device.malloc(baseLevel->Ncols*sizeof(dfloat)); precon->o_xG = mesh->device.malloc(baseLevel->Ncols*sizeof(dfloat)); // baseLevel->Srhs = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat)); // baseLevel->Sx = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat)); // baseLevel->o_Srhs = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat)); // baseLevel->o_Sx = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat)); // baseLevel->weightedInnerProds = false; // baseLevel->gatherArgs = (void **) calloc(3,sizeof(void*)); // baseLevel->gatherArgs[0] = (void *) elliptic; // baseLevel->gatherArgs[1] = (void *) precon->FEMogs; //use the gs made from the partial gathered femgrid // baseLevel->gatherArgs[2] = (void *) &(baseLevel->o_Sx); // baseLevel->scatterArgs = baseLevel->gatherArgs; // baseLevel->device_gather = ellipticGather; // baseLevel->device_scatter = ellipticScatter; } } void BuildFEMMatrixTri2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { for (int n=0;n<femMesh->Np;n++) { dlong idn = localIds[e*femMesh->Np + n]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int m=0;m<femMesh->Np;m++) { dlong idm = localIds[e*femMesh->Np + m]; if (globalNumbering[idm]<0) continue; //skip masked nodes dfloat val = 0.; dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID]; dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID]; dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID]; dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID]; val += Grr*femMesh->Srr[m+n*femMesh->Np]; val += Grs*femMesh->Srs[m+n*femMesh->Np]; val += Grs*femMesh->Ssr[m+n*femMesh->Np]; val += Gss*femMesh->Sss[m+n*femMesh->Np]; val += J*lambda*femMesh->MM[m+n*femMesh->Np]; dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { #pragma omp critical { // pack non-zero A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { for (int ny=0;ny<femMesh->Nq;ny++) { for (int nx=0;nx<femMesh->Nq;nx++) { dlong idn = localIds[e*femMesh->Np + nx+ny*femMesh->Nq]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int my=0;my<femMesh->Nq;my++) { for (int mx=0;mx<femMesh->Nq;mx++) { dlong idm = localIds[e*femMesh->Np + mx+my*femMesh->Nq]; if (globalNumbering[idm]<0) continue; //skip masked nodes int id; dfloat val = 0.; if (ny==my) { for (int k=0;k<femMesh->Nq;k++) { id = k+ny*femMesh->Nq; dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np]; val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq]; } } id = mx+ny*femMesh->Nq; dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq]; id = nx+my*femMesh->Nq; dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq]; if (nx==mx) { for (int k=0;k<femMesh->Nq;k++) { id = nx+k*femMesh->Nq; dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np]; val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq]; } } if ((nx==mx)&&(ny==my)) { id = nx + ny*femMesh->Nq; dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np]; val += JW*lambda; } dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { #pragma omp critical { // pack non-zero A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } } } void BuildFEMMatrixTet3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID]; dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID]; dfloat Grt = femMesh->ggeo[e*femMesh->Nggeo + G02ID]; dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID]; dfloat Gst = femMesh->ggeo[e*femMesh->Nggeo + G12ID]; dfloat Gtt = femMesh->ggeo[e*femMesh->Nggeo + G22ID]; dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID]; for (int n=0;n<femMesh->Np;n++) { dlong idn = localIds[e*femMesh->Np + n]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int m=0;m<femMesh->Np;m++) { dlong idm = localIds[e*femMesh->Np + m]; if (globalNumbering[idm]<0) continue; //skip masked nodes dfloat val = 0.; val += Grr*femMesh->Srr[m+n*femMesh->Np]; val += Grs*femMesh->Srs[m+n*femMesh->Np]; val += Grt*femMesh->Srt[m+n*femMesh->Np]; val += Grs*femMesh->Ssr[m+n*femMesh->Np]; val += Gss*femMesh->Sss[m+n*femMesh->Np]; val += Gst*femMesh->Sst[m+n*femMesh->Np]; val += Grt*femMesh->Str[m+n*femMesh->Np]; val += Gst*femMesh->Sts[m+n*femMesh->Np]; val += Gtt*femMesh->Stt[m+n*femMesh->Np]; val += J*lambda*femMesh->MM[m+n*femMesh->Np]; dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { #pragma omp critical { // pack non-zero A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } void BuildFEMMatrixHex3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { for (int nz=0;nz<femMesh->Nq;nz++) { for (int ny=0;ny<femMesh->Nq;ny++) { for (int nx=0;nx<femMesh->Nq;nx++) { dlong nn = nx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dlong idn = localIds[e*femMesh->Np + nn]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int mz=0;mz<femMesh->Nq;mz++) { for (int my=0;my<femMesh->Nq;my++) { for (int mx=0;mx<femMesh->Nq;mx++) { dlong mm = mx+my*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq; dlong idm = localIds[e*femMesh->Np + mm]; if (globalNumbering[idm]<0) continue; //skip masked nodes int id; dfloat val = 0.; if ((ny==my)&&(nz==mz)) { for (int k=0;k<femMesh->Nq;k++) { id = k+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np]; val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq]; } } if (nz==mz) { id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq]; id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq]; } if (ny==my) { id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Grt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np]; val += Grt*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq]; id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq; dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np]; val += Gst*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq]; } if ((nx==mx)&&(nz==mz)) { for (int k=0;k<femMesh->Nq;k++) { id = nx+k*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np]; val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq]; } } if (nx==mx) { id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np]; val += Gst*femMesh->D[ny+my*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq]; id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq; dfloat Gts = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np]; val += Gts*femMesh->D[my+ny*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq]; } if ((nx==mx)&&(ny==my)) { for (int k=0;k<femMesh->Nq;k++) { id = nx+ny*femMesh->Nq+k*femMesh->Nq*femMesh->Nq; dfloat Gtt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G22ID*femMesh->Np]; val += Gtt*femMesh->D[nz+k*femMesh->Nq]*femMesh->D[mz+k*femMesh->Nq]; } } if ((nx==mx)&&(ny==my)&&(nz==mz)) { id = nx + ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np]; val += JW*lambda; } // pack non-zero dfloat nonZeroThreshold = 1e-7; if (fabs(val) >= nonZeroThreshold) { #pragma omp critical { A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } } } } }
otfft_misc.h
// Copyright (c) 2015, OK おじさん(岡久卓也) // Copyright (c) 2015, OK Ojisan(Takuya OKAHISA) // Copyright (c) 2017 to the present, DEWETRON GmbH // OTFFT Implementation Version 9.5 // based on Stockham FFT algorithm // from OK Ojisan(Takuya OKAHISA), source: http://www.moon.sannet.ne.jp/okahisa/stockham/stockham.html #pragma once #include "otfft_types.h" namespace OTFFT_MISC { using namespace OTFFT; } //============================================================================= //============================================================================= // FFT Weight Initialize Routine //============================================================================= namespace OTFFT_MISC { static inline void init_W(const int N, complex_vector W) noexcept { static const int OMP_THRESHOLD_W = 1<<16; const double theta0 = 2*CONSTANT::PI/N; const int Nh = N/2; const int Nq = N/4; const int Ne = N/8; const int Nd = N - Nq; if (N < 1) {} else if (N < 2) { W[0] = W[1] = 1; } else if (N < 4) { W[0] = W[2] = 1; W[1] = -1; } else if (N < 8) { W[0] = complex_t( 1, 0); W[1] = complex_t( 0, -1); W[2] = complex_t(-1, 0); W[3] = complex_t( 0, 1); W[4] = complex_t( 1, 0); } else if (N < OMP_THRESHOLD_W) for (int p = 0; p <= Ne; p++) { const double theta = p * theta0; const double c = cos(theta); const double s = -sin(theta); W[p] = complex_t( c, s); W[Nq-p] = complex_t(-s, -c); W[Nq+p] = complex_t( s, -c); W[Nh-p] = complex_t(-c, s); W[Nh+p] = complex_t(-c, -s); W[Nd-p] = complex_t( s, c); W[Nd+p] = complex_t(-s, c); W[N-p] = complex_t( c, -s); } else #pragma omp parallel for schedule(static) for (int p = 0; p <= Ne; p++) { const double theta = p * theta0; const double c = cos(theta); const double s = -sin(theta); W[p] = complex_t( c, s); W[Nq-p] = complex_t(-s, -c); W[Nq+p] = complex_t( s, -c); W[Nh-p] = complex_t(-c, s); W[Nh+p] = complex_t(-c, -s); W[Nd-p] = complex_t( s, c); W[Nd+p] = complex_t(-s, c); W[N-p] = complex_t( c, -s); } } static inline void speedup_magic(const int N = 1 << 18) noexcept { const double theta0 = 2*CONSTANT::PI/N; volatile double sum = 0; for (int p = 0; p < N; p++) { sum += cos(p * theta0); } } } // namespace OTFFT_MISC #if defined(USE_SSE2) || defined(USE_AVX) || defined(USE_AVX2) //============================================================================= // SSE2/SSE3 //============================================================================= extern "C" { #include <emmintrin.h> } namespace OTFFT_MISC { typedef __m128d xmm; static inline xmm cmplx(const double& x, const double& y) noexcept force_inline; static inline xmm cmplx(const double& x, const double& y) noexcept { return _mm_setr_pd(x, y); } static inline xmm getpz(const complex_t& z) noexcept force_inline; static inline xmm getpz(const complex_t& z) noexcept { #ifdef USE_UNALIGNED_MEMORY return _mm_loadu_pd(&z.Re); #else return _mm_load_pd(&z.Re); #endif } static inline xmm getpz(const_double_vector x) noexcept force_inline2; static inline xmm getpz(const_double_vector x) noexcept { #ifdef USE_UNALIGNED_MEMORY return _mm_loadu_pd(x); #else return _mm_load_pd(x); #endif } static inline void setpz(complex_t& z, const xmm& x) noexcept force_inline3; static inline void setpz(complex_t& z, const xmm& x) noexcept { #ifdef USE_UNALIGNED_MEMORY _mm_storeu_pd(&z.Re, x); #else _mm_store_pd(&z.Re, x); #endif } static inline void setpz(double_vector x, const xmm& z) noexcept force_inline3; static inline void setpz(double_vector x, const xmm& z) noexcept { #ifdef USE_UNALIGNED_MEMORY _mm_storeu_pd(x, z); #else _mm_store_pd(x, z); #endif } static inline void swappz(complex_t& x, complex_t& y) noexcept force_inline3; static inline void swappz(complex_t& x, complex_t& y) noexcept { const xmm z = getpz(x); setpz(x, getpz(y)); setpz(y, z); } static inline xmm cnjpz(const xmm& xy) noexcept force_inline; static inline xmm cnjpz(const xmm& xy) noexcept { static const xmm zm = { 0.0, -0.0 }; return _mm_xor_pd(zm, xy); } static inline xmm jxpz(const xmm& xy) noexcept force_inline; static inline xmm jxpz(const xmm& xy) noexcept { const xmm xmy = cnjpz(xy); return _mm_shuffle_pd(xmy, xmy, 1); } static inline xmm negpz(const xmm& xy) noexcept force_inline; static inline xmm negpz(const xmm& xy) noexcept { static const xmm mm = { -0.0, -0.0 }; return _mm_xor_pd(mm, xy); } static inline xmm addpz(const xmm& a, const xmm& b) noexcept force_inline; static inline xmm addpz(const xmm& a, const xmm& b) noexcept { return _mm_add_pd(a, b); } static inline xmm subpz(const xmm& a, const xmm& b) noexcept force_inline; static inline xmm subpz(const xmm& a, const xmm& b) noexcept { return _mm_sub_pd(a, b); } static inline xmm mulpd(const xmm& a, const xmm& b) noexcept force_inline; static inline xmm mulpd(const xmm& a, const xmm& b) noexcept { return _mm_mul_pd(a, b); } static inline xmm divpd(const xmm& a, const xmm& b) noexcept force_inline; static inline xmm divpd(const xmm& a, const xmm& b) noexcept { return _mm_div_pd(a, b); } template <int N, int mode> static inline xmm scalepz(const xmm& z) noexcept force_inline; template <int N, int mode> static inline xmm scalepz(const xmm& z) noexcept { static const double scale = mode == scale_1 ? 1.0 : mode == scale_unitary ? 1.0/mysqrt(N) : mode == scale_length ? 1.0/N : 0.0; static const xmm sv = { scale, scale }; return mode == scale_1 ? z : mulpd(sv, z); } } // namespace OTFFT_MISC #if defined(USE_AVX) || defined(USE_AVX2) //----------------------------------------------------------------------------- // SSE3 //----------------------------------------------------------------------------- extern "C" { #include <pmmintrin.h> #ifdef USE_AVX2 #include <immintrin.h> #endif } namespace OTFFT_MISC { static inline xmm haddpz(const xmm& ab, const xmm& xy) noexcept force_inline; static inline xmm haddpz(const xmm& ab, const xmm& xy) noexcept { return _mm_hadd_pd(ab, xy); // (a + b, x + y) } static inline xmm mulpz(const xmm& ab, const xmm& xy) noexcept force_inline; static inline xmm mulpz(const xmm& ab, const xmm& xy) noexcept { const xmm aa = _mm_unpacklo_pd(ab, ab); const xmm bb = _mm_unpackhi_pd(ab, ab); const xmm yx = _mm_shuffle_pd(xy, xy, 1); #ifdef USE_AVX2 return _mm_fmaddsub_pd(aa, xy, _mm_mul_pd(bb, yx)); #else return _mm_addsub_pd(_mm_mul_pd(aa, xy), _mm_mul_pd(bb, yx)); #endif } static inline xmm divpz(const xmm& ab, const xmm& xy) noexcept force_inline; static inline xmm divpz(const xmm& ab, const xmm& xy) noexcept { const xmm x2y2 = _mm_mul_pd(xy, xy); const xmm r2r2 = _mm_hadd_pd(x2y2, x2y2); return _mm_div_pd(mulpz(ab, cnjpz(xy)), r2r2); } static inline xmm v8xpz(const xmm& xy) noexcept force_inline; static inline xmm v8xpz(const xmm& xy) noexcept { static const xmm rr = { CONSTANT::SQRT1_2, CONSTANT::SQRT1_2 }; const xmm yx = _mm_shuffle_pd(xy, xy, 1); return _mm_mul_pd(rr, _mm_addsub_pd(xy, yx)); } } // namespace OTFFT_MISC #else //----------------------------------------------------------------------------- // SSE3 Emulation //----------------------------------------------------------------------------- namespace OTFFT_MISC { static inline xmm haddpz(const xmm& ab, const xmm& xy) noexcept force_inline; static inline xmm haddpz(const xmm& ab, const xmm& xy) noexcept { const xmm ba = _mm_shuffle_pd(ab, ab, 1); const xmm yx = _mm_shuffle_pd(xy, xy, 1); const xmm apb = _mm_add_sd(ab, ba); const xmm xpy = _mm_add_sd(xy, yx); return _mm_shuffle_pd(apb, xpy, 0); // (a + b, x + y) } static inline xmm mulpz(const xmm& ab, const xmm& xy) noexcept force_inline; static inline xmm mulpz(const xmm& ab, const xmm& xy) noexcept { const xmm aa = _mm_unpacklo_pd(ab, ab); const xmm bb = _mm_unpackhi_pd(ab, ab); return _mm_add_pd(_mm_mul_pd(aa, xy), _mm_mul_pd(bb, jxpz(xy))); } static inline xmm divpz(const xmm& ab, const xmm& xy) noexcept force_inline; static inline xmm divpz(const xmm& ab, const xmm& xy) noexcept { const xmm x2y2 = _mm_mul_pd(xy, xy); const xmm y2x2 = _mm_shuffle_pd(x2y2, x2y2, 1); const xmm r2r2 = _mm_add_pd(x2y2, y2x2); return _mm_div_pd(mulpz(ab, cnjpz(xy)), r2r2); } static inline xmm v8xpz(const xmm& xy) noexcept force_inline; static inline xmm v8xpz(const xmm& xy) noexcept { static const xmm rr = { CONSTANT::SQRT1_2, CONSTANT::SQRT1_2 }; return _mm_mul_pd(rr, _mm_add_pd(xy, jxpz(xy))); } } // namespace OTFFT_MISC //----------------------------------------------------------------------------- #endif // USE_AVX namespace OTFFT_MISC { static inline xmm w8xpz(const xmm& xy) noexcept force_inline; static inline xmm w8xpz(const xmm& xy) noexcept { static const xmm rr = { CONSTANT::SQRT1_2, CONSTANT::SQRT1_2 }; const xmm ymx = cnjpz(_mm_shuffle_pd(xy, xy, 1)); return _mm_mul_pd(rr, _mm_add_pd(xy, ymx)); } static inline xmm h1xpz(const xmm& xy) noexcept force_inline; static inline xmm h1xpz(const xmm& xy) noexcept { static const xmm h1 = { CONSTANT::H1X, CONSTANT::H1Y }; return mulpz(h1, xy); } static inline xmm h3xpz(const xmm& xy) noexcept force_inline; static inline xmm h3xpz(const xmm& xy) noexcept { static const xmm h3 = { -CONSTANT::H1Y, -CONSTANT::H1X }; return mulpz(h3, xy); } static inline xmm hfxpz(const xmm& xy) noexcept force_inline; static inline xmm hfxpz(const xmm& xy) noexcept { static const xmm hf = { CONSTANT::H1X, -CONSTANT::H1Y }; return mulpz(hf, xy); } static inline xmm hdxpz(const xmm& xy) noexcept force_inline; static inline xmm hdxpz(const xmm& xy) noexcept { static const xmm hd = { -CONSTANT::H1Y, CONSTANT::H1X }; return mulpz(hd, xy); } #if !defined(USE_AVX) && !defined(USE_AVX2) static inline void* simd_malloc(const size_t n) { return _mm_malloc(n, 16); } static inline void simd_free(void* p) { _mm_free(p); } #endif } // namespace OTFFT_MISC #else //============================================================================= // SSE2/SSE3 Emulation //============================================================================= #include <cstdlib> namespace OTFFT_MISC { struct xmm { double Re, Im; }; static inline xmm cmplx(const double& x, const double& y) noexcept force_inline; static inline xmm cmplx(const double& x, const double& y) noexcept { const xmm z = { x, y }; return z; } static inline xmm getpz(const complex_t& z) noexcept force_inline; static inline xmm getpz(const complex_t& z) noexcept { const xmm x = { z.Re, z.Im }; return x; } static inline xmm getpz(const_double_vector x) noexcept force_inline; static inline xmm getpz(const_double_vector x) noexcept { const xmm z = { x[0], x[1] }; return z; } static inline void setpz(complex_t& z, const xmm& x) noexcept force_inline3; static inline void setpz(complex_t& z, const xmm& x) noexcept { z.Re = x.Re; z.Im = x.Im; } static inline void setpz(double_vector x, const xmm& z) noexcept force_inline3; static inline void setpz(double_vector x, const xmm& z) noexcept { x[0] = z.Re; x[1] = z.Im; } static inline void swappz(complex_t& x, complex_t& y) noexcept force_inline3; static inline void swappz(complex_t& x, complex_t& y) noexcept { const xmm z = getpz(x); setpz(x, getpz(y)); setpz(y, z); } static inline xmm cnjpz(const xmm& z) noexcept force_inline; static inline xmm cnjpz(const xmm& z) noexcept { const xmm x = { z.Re, -z.Im }; return x; } static inline xmm jxpz(const xmm& z) noexcept force_inline; static inline xmm jxpz(const xmm& z) noexcept { const xmm x = { -z.Im, z.Re }; return x; } static inline xmm negpz(const xmm& z) noexcept force_inline; static inline xmm negpz(const xmm& z) noexcept { const xmm x = { -z.Re, -z.Im }; return x; } static inline xmm addpz(const xmm& a, const xmm& b) noexcept force_inline; static inline xmm addpz(const xmm& a, const xmm& b) noexcept { const xmm x = { a.Re + b.Re, a.Im + b.Im }; return x; } static inline xmm subpz(const xmm& a, const xmm& b) noexcept force_inline; static inline xmm subpz(const xmm& a, const xmm& b) noexcept { const xmm x = { a.Re - b.Re, a.Im - b.Im }; return x; } static inline xmm mulpd(const xmm& a, const xmm& b) noexcept force_inline; static inline xmm mulpd(const xmm& a, const xmm& b) noexcept { const xmm x = { a.Re*b.Re, a.Im*b.Im }; return x; } static inline xmm divpd(const xmm& a, const xmm& b) noexcept force_inline; static inline xmm divpd(const xmm& a, const xmm& b) noexcept { const xmm x = { a.Re/b.Re, a.Im/b.Im }; return x; } template <int N, int mode> static inline xmm scalepz(const xmm& z) noexcept force_inline; template <int N, int mode> static inline xmm scalepz(const xmm& z) noexcept { static const double scale = mode == scale_1 ? 1.0 : mode == scale_unitary ? 1.0/mysqrt(N) : mode == scale_length ? 1.0/N : 0.0; static const xmm sv = { scale, scale }; return mode == scale_1 ? z : mulpd(sv, z); } static inline xmm mulpz(const xmm& a, const xmm& b) noexcept force_inline; static inline xmm mulpz(const xmm& a, const xmm& b) noexcept { const xmm x = { a.Re*b.Re - a.Im*b.Im, a.Re*b.Im + a.Im*b.Re }; return x; } static inline xmm divpz(const xmm& a, const xmm& b) noexcept force_inline; static inline xmm divpz(const xmm& a, const xmm& b) noexcept { const double b2 = b.Re*b.Re + b.Im*b.Im; const xmm acb = mulpz(a, cnjpz(b)); const xmm x = { acb.Re/b2, acb.Im/b2 }; return x; } static inline xmm haddpz(const xmm& ab, const xmm& xy) noexcept force_inline; static inline xmm haddpz(const xmm& ab, const xmm& xy) noexcept { const xmm x = { ab.Re + ab.Im, xy.Re + xy.Im }; return x; } static inline xmm v8xpz(const xmm& z) noexcept force_inline; static inline xmm v8xpz(const xmm& z) noexcept { const xmm x = { CONSTANT::SQRT1_2*(z.Re - z.Im), CONSTANT::SQRT1_2*(z.Re + z.Im) }; return x; } static inline xmm w8xpz(const xmm& z) noexcept force_inline; static inline xmm w8xpz(const xmm& z) noexcept { const xmm x = { CONSTANT::SQRT1_2*(z.Re + z.Im), CONSTANT::SQRT1_2*(z.Im - z.Re) }; return x; } static inline xmm h1xpz(const xmm& xy) noexcept force_inline; static inline xmm h1xpz(const xmm& xy) noexcept { static const xmm h1 = { CONSTANT::H1X, CONSTANT::H1Y }; return mulpz(h1, xy); } static inline xmm h3xpz(const xmm& xy) noexcept force_inline; static inline xmm h3xpz(const xmm& xy) noexcept { static const xmm h3 = { -CONSTANT::H1Y, -CONSTANT::H1X }; return mulpz(h3, xy); } static inline xmm hfxpz(const xmm& xy) noexcept force_inline; static inline xmm hfxpz(const xmm& xy) noexcept { static const xmm hf = { CONSTANT::H1X, -CONSTANT::H1Y }; return mulpz(hf, xy); } static inline xmm hdxpz(const xmm& xy) noexcept force_inline; static inline xmm hdxpz(const xmm& xy) noexcept { static const xmm hd = { -CONSTANT::H1Y, CONSTANT::H1X }; return mulpz(hd, xy); } static inline void* simd_malloc(const size_t n) { return malloc(n); } static inline void simd_free(void* p) { free(p); } } // namespace OTFFT_MISC #endif // USE_SSE2 #if defined(USE_AVX) || defined(USE_AVX2) //============================================================================= // AVX/AVX2 //============================================================================= extern "C" { #include <immintrin.h> } namespace OTFFT_MISC { typedef __m256d ymm; static inline void zeroupper() noexcept force_inline; static inline void zeroupper() noexcept { _mm256_zeroupper(); } static inline ymm cmplx2(const double& a, const double& b, const double& c, const double& d) noexcept force_inline; static inline ymm cmplx2(const double& a, const double& b, const double& c, const double& d) noexcept { return _mm256_setr_pd(a, b, c, d); } static inline ymm cmplx2(const complex_t& x, const complex_t& y) noexcept force_inline; static inline ymm cmplx2(const complex_t& x, const complex_t& y) noexcept { const xmm a = getpz(x); const xmm b = getpz(y); const ymm ax = _mm256_castpd128_pd256(a); const ymm bx = _mm256_castpd128_pd256(b); return _mm256_permute2f128_pd(ax, bx, 0x20); } static inline ymm cmplx3(const complex_t& x, const complex_t& y) noexcept force_inline; static inline ymm cmplx3(const complex_t& x, const complex_t& y) noexcept { #ifdef USE_UNALIGNED_MEMORY const ymm ax = _mm256_loadu_pd(&x.Re); const ymm bx = _mm256_loadu_pd(&y.Re); #else const ymm ax = _mm256_load_pd(&x.Re); const ymm bx = _mm256_load_pd(&y.Re); #endif return _mm256_permute2f128_pd(ax, bx, 0x20); } static inline ymm getpz2(const_complex_vector z) noexcept force_inline2; static inline ymm getpz2(const_complex_vector z) noexcept { #ifdef USE_UNALIGNED_MEMORY return _mm256_loadu_pd(&z->Re); #else return _mm256_load_pd(&z->Re); #endif } static inline void setpz2(complex_vector z, const ymm& x) noexcept force_inline3; static inline void setpz2(complex_vector z, const ymm& x) noexcept { #ifdef USE_UNALIGNED_MEMORY _mm256_storeu_pd(&z->Re, x); #else _mm256_store_pd(&z->Re, x); #endif } static inline ymm cnjpz2(const ymm& xy) noexcept force_inline; static inline ymm cnjpz2(const ymm& xy) noexcept { static const ymm zm = { 0.0, -0.0, 0.0, -0.0 }; return _mm256_xor_pd(zm, xy); } static inline ymm jxpz2(const ymm& xy) noexcept force_inline; static inline ymm jxpz2(const ymm& xy) noexcept { const ymm xmy = cnjpz2(xy); return _mm256_shuffle_pd(xmy, xmy, 5); } static inline ymm negpz2(const ymm& xy) noexcept force_inline; static inline ymm negpz2(const ymm& xy) noexcept { static const ymm mm = { -0.0, -0.0, -0.0, -0.0 }; return _mm256_xor_pd(mm, xy); } static inline ymm addpz2(const ymm& a, const ymm& b) noexcept force_inline; static inline ymm addpz2(const ymm& a, const ymm& b) noexcept { return _mm256_add_pd(a, b); } static inline ymm subpz2(const ymm& a, const ymm& b) noexcept force_inline; static inline ymm subpz2(const ymm& a, const ymm& b) noexcept { return _mm256_sub_pd(a, b); } static inline ymm mulpd2(const ymm& a, const ymm& b) noexcept force_inline; static inline ymm mulpd2(const ymm& a, const ymm& b) noexcept { return _mm256_mul_pd(a, b); } static inline ymm divpd2(const ymm& a, const ymm& b) noexcept force_inline; static inline ymm divpd2(const ymm& a, const ymm& b) noexcept { return _mm256_div_pd(a, b); } static inline ymm mulpz2(const ymm& ab, const ymm& xy) noexcept force_inline; static inline ymm mulpz2(const ymm& ab, const ymm& xy) noexcept { const ymm aa = _mm256_unpacklo_pd(ab, ab); const ymm bb = _mm256_unpackhi_pd(ab, ab); const ymm yx = _mm256_shuffle_pd(xy, xy, 5); #ifdef USE_AVX2 return _mm256_fmaddsub_pd(aa, xy, _mm256_mul_pd(bb, yx)); #else return _mm256_addsub_pd(_mm256_mul_pd(aa, xy), _mm256_mul_pd(bb, yx)); #endif } static inline ymm divpz2(const ymm& ab, const ymm& xy) noexcept force_inline; static inline ymm divpz2(const ymm& ab, const ymm& xy) noexcept { const ymm x2y2 = _mm256_mul_pd(xy, xy); const ymm r2r2 = _mm256_hadd_pd(x2y2, x2y2); return _mm256_div_pd(mulpz2(ab, cnjpz2(xy)), r2r2); } template <int N, int mode> static inline ymm scalepz2(const ymm& z) noexcept force_inline; template <int N, int mode> static inline ymm scalepz2(const ymm& z) noexcept { static const double scale = mode == scale_1 ? 1.0 : mode == scale_unitary ? 1.0/mysqrt(N) : mode == scale_length ? 1.0/N : 0.0; static const ymm sv = { scale, scale, scale, scale }; return mode == scale_1 ? z : mulpd2(sv, z); } static inline ymm v8xpz2(const ymm& xy) noexcept force_inline; static inline ymm v8xpz2(const ymm& xy) noexcept { static const ymm rr = { CONSTANT::SQRT1_2, CONSTANT::SQRT1_2, CONSTANT::SQRT1_2, CONSTANT::SQRT1_2 }; const ymm yx = _mm256_shuffle_pd(xy, xy, 5); return _mm256_mul_pd(rr, _mm256_addsub_pd(xy, yx)); } static inline ymm w8xpz2(const ymm& xy) noexcept force_inline; static inline ymm w8xpz2(const ymm& xy) noexcept { static const ymm rr = { CONSTANT::SQRT1_2, CONSTANT::SQRT1_2, CONSTANT::SQRT1_2, CONSTANT::SQRT1_2 }; const ymm ymx = cnjpz2(_mm256_shuffle_pd(xy, xy, 5)); return _mm256_mul_pd(rr, _mm256_add_pd(xy, ymx)); } static inline ymm h1xpz2(const ymm& xy) noexcept force_inline; static inline ymm h1xpz2(const ymm& xy) noexcept { static const ymm h1 = { CONSTANT::H1X, CONSTANT::H1Y, CONSTANT::H1X, CONSTANT::H1Y }; return mulpz2(h1, xy); } static inline ymm h3xpz2(const ymm& xy) noexcept force_inline; static inline ymm h3xpz2(const ymm& xy) noexcept { static const ymm h3 = { -CONSTANT::H1Y, -CONSTANT::H1X, -CONSTANT::H1Y, -CONSTANT::H1X }; return mulpz2(h3, xy); } static inline ymm hfxpz2(const ymm& xy) noexcept force_inline; static inline ymm hfxpz2(const ymm& xy) noexcept { static const ymm hf = { CONSTANT::H1X, -CONSTANT::H1Y, CONSTANT::H1X, -CONSTANT::H1Y }; return mulpz2(hf, xy); } static inline ymm hdxpz2(const ymm& xy) noexcept force_inline; static inline ymm hdxpz2(const ymm& xy) noexcept { static const ymm hd = { -CONSTANT::H1Y, CONSTANT::H1X, -CONSTANT::H1Y, CONSTANT::H1X }; return mulpz2(hd, xy); } static inline ymm duppz2(const xmm& x) noexcept force_inline; static inline ymm duppz2(const xmm& x) noexcept { return _mm256_broadcast_pd(&x); } static inline ymm duppz3(const complex_t& z) noexcept force_inline; static inline ymm duppz3(const complex_t& z) noexcept { const ymm x = getpz2(&z); return _mm256_permute2f128_pd(x, x, 0); } static inline ymm cat(const xmm& a, const xmm& b) noexcept force_inline; static inline ymm cat(const xmm& a, const xmm& b) noexcept { const ymm ax = _mm256_castpd128_pd256(a); const ymm bx = _mm256_castpd128_pd256(b); return _mm256_permute2f128_pd(ax, bx, 0x20); } static inline ymm catlo(const ymm& ax, const ymm& by) noexcept force_inline; static inline ymm catlo(const ymm& ax, const ymm& by) noexcept { return _mm256_permute2f128_pd(ax, by, 0x20); // == ab } static inline ymm cathi(const ymm& ax, const ymm& by) noexcept force_inline; static inline ymm cathi(const ymm& ax, const ymm& by) noexcept { return _mm256_permute2f128_pd(ax, by, 0x31); // == xy } template <int s> static inline ymm getwp2(const_complex_vector W, const int p) noexcept force_inline2; template <int s> static inline ymm getwp2(const_complex_vector W, const int p) noexcept { const int sp = s*p; return cmplx2(W[sp], W[sp+s]); } template <int s> static inline ymm cnj_getwp2(const_complex_vector W, const int p) noexcept force_inline2; template <int s> static inline ymm cnj_getwp2(const_complex_vector W, const int p) noexcept { const int sp = s*p; return cnjpz2(cmplx2(W[sp], W[sp+s])); } static inline xmm getlo(const ymm& a_b) noexcept force_inline; static inline xmm getlo(const ymm& a_b) noexcept { return _mm256_castpd256_pd128(a_b); // == a } static inline xmm gethi(const ymm& a_b) noexcept force_inline; static inline xmm gethi(const ymm& a_b) noexcept { return _mm256_extractf128_pd(a_b, 1); // == b } template <int s> static inline ymm getpz3(const_complex_vector z) noexcept force_inline2; template <int s> static inline ymm getpz3(const_complex_vector z) noexcept { return cmplx2(z[0], z[s]); } template <int s> static inline void setpz3(complex_vector z, const ymm& x) noexcept force_inline3; template <int s> static inline void setpz3(complex_vector z, const ymm& x) noexcept { setpz(z[0], getlo(x)); setpz(z[s], gethi(x)); } static inline void* simd_malloc(const size_t n) { return _mm_malloc(n, 32); } static inline void simd_free(void* p) { _mm_free(p); } } // namespace OTFFT_MISC #else //============================================================================= // AVX/AVX2 Emulation //============================================================================= namespace OTFFT_MISC { struct ymm { xmm lo, hi; }; static inline void zeroupper() noexcept force_inline; static inline void zeroupper() noexcept {} static inline ymm cmplx2(const double& a, const double& b, const double& c, const double &d) noexcept force_inline; static inline ymm cmplx2(const double& a, const double& b, const double& c, const double &d) noexcept { const ymm y = { cmplx(a, b), cmplx(c, d) }; return y; } static inline ymm cmplx2(const complex_t& a, const complex_t& b) noexcept force_inline; static inline ymm cmplx2(const complex_t& a, const complex_t& b) noexcept { const ymm y = { getpz(a), getpz(b) }; return y; } static inline ymm getpz2(const_complex_vector z) noexcept force_inline2; static inline ymm getpz2(const_complex_vector z) noexcept { const ymm y = { getpz(z[0]), getpz(z[1]) }; return y; } static inline void setpz2(complex_vector z, const ymm& y) noexcept force_inline3; static inline void setpz2(complex_vector z, const ymm& y) noexcept { setpz(z[0], y.lo); setpz(z[1], y.hi); } static inline ymm cnjpz2(const ymm& xy) noexcept force_inline; static inline ymm cnjpz2(const ymm& xy) noexcept { const ymm y = { cnjpz(xy.lo), cnjpz(xy.hi) }; return y; } static inline ymm jxpz2(const ymm& xy) noexcept force_inline; static inline ymm jxpz2(const ymm& xy) noexcept { const ymm y = { jxpz(xy.lo), jxpz(xy.hi) }; return y; } static inline ymm addpz2(const ymm& a, const ymm& b) noexcept force_inline; static inline ymm addpz2(const ymm& a, const ymm& b) noexcept { const ymm y = { addpz(a.lo, b.lo), addpz(a.hi, b.hi) }; return y; } static inline ymm subpz2(const ymm& a, const ymm& b) noexcept force_inline; static inline ymm subpz2(const ymm& a, const ymm& b) noexcept { const ymm y = { subpz(a.lo, b.lo), subpz(a.hi, b.hi) }; return y; } static inline ymm mulpd2(const ymm& a, const ymm& b) noexcept force_inline; static inline ymm mulpd2(const ymm& a, const ymm& b) noexcept { const ymm y = { mulpd(a.lo, b.lo), mulpd(a.hi, b.hi) }; return y; } static inline ymm divpd2(const ymm& a, const ymm& b) noexcept force_inline; static inline ymm divpd2(const ymm& a, const ymm& b) noexcept { const ymm y = { divpd(a.lo, b.lo), divpd(a.hi, b.hi) }; return y; } static inline ymm mulpz2(const ymm& a, const ymm& b) noexcept force_inline; static inline ymm mulpz2(const ymm& a, const ymm& b) noexcept { const ymm y = { mulpz(a.lo, b.lo), mulpz(a.hi, b.hi) }; return y; } static inline ymm divpz2(const ymm& a, const ymm& b) noexcept force_inline; static inline ymm divpz2(const ymm& a, const ymm& b) noexcept { const ymm y = { divpz(a.lo, b.lo), divpz(a.hi, b.hi) }; return y; } template <int N, int mode> static inline ymm scalepz2(const ymm& z) noexcept force_inline; template <int N, int mode> static inline ymm scalepz2(const ymm& z) noexcept { static const double scale = mode == scale_1 ? 1.0 : mode == scale_unitary ? 1.0/mysqrt(N) : mode == scale_length ? 1.0/N : 0.0; static const xmm sv = { scale, scale }; static const ymm sv2 = { sv, sv }; return mode == scale_1 ? z : mulpd2(sv2, z); } static inline ymm v8xpz2(const ymm& xy) noexcept force_inline; static inline ymm v8xpz2(const ymm& xy) noexcept { const ymm y = { v8xpz(xy.lo), v8xpz(xy.hi) }; return y; } static inline ymm w8xpz2(const ymm& xy) noexcept force_inline; static inline ymm w8xpz2(const ymm& xy) noexcept { const ymm y = { w8xpz(xy.lo), w8xpz(xy.hi) }; return y; } static inline ymm h1xpz2(const ymm& xy) noexcept force_inline; static inline ymm h1xpz2(const ymm& xy) noexcept { const ymm y = { h1xpz(xy.lo), h1xpz(xy.hi) }; return y; } static inline ymm h3xpz2(const ymm& xy) noexcept force_inline; static inline ymm h3xpz2(const ymm& xy) noexcept { const ymm y = { h3xpz(xy.lo), h3xpz(xy.hi) }; return y; } static inline ymm hfxpz2(const ymm& xy) noexcept force_inline; static inline ymm hfxpz2(const ymm& xy) noexcept { const ymm y = { hfxpz(xy.lo), hfxpz(xy.hi) }; return y; } static inline ymm hdxpz2(const ymm& xy) noexcept force_inline; static inline ymm hdxpz2(const ymm& xy) noexcept { const ymm y = { hdxpz(xy.lo), hdxpz(xy.hi) }; return y; } static inline ymm duppz2(const xmm& x) noexcept force_inline; static inline ymm duppz2(const xmm& x) noexcept { const ymm y = { x, x }; return y; } static inline ymm duppz3(const complex_t& z) noexcept force_inline; static inline ymm duppz3(const complex_t& z) noexcept { const xmm x = getpz(z); const ymm y = { x, x }; return y; } static inline ymm cat(const xmm& a, const xmm& b) noexcept force_inline; static inline ymm cat(const xmm& a, const xmm& b) noexcept { const ymm y = { a, b }; return y; } static inline ymm catlo(const ymm& ax, const ymm& by) noexcept force_inline; static inline ymm catlo(const ymm& ax, const ymm& by) noexcept { const ymm ab = { ax.lo, by.lo }; return ab; } static inline ymm cathi(const ymm& ax, const ymm& by) noexcept force_inline; static inline ymm cathi(const ymm& ax, const ymm& by) noexcept { const ymm xy = { ax.hi, by.hi }; return xy; } template <int s> static inline ymm getwp2(const_complex_vector W, const int p) noexcept force_inline2; template <int s> static inline ymm getwp2(const_complex_vector W, const int p) noexcept { const int sp = s*p; return cmplx2(W[sp], W[sp+s]); } template <int s> static inline ymm cnj_getwp2(const_complex_vector W, const int p) noexcept force_inline2; template <int s> static inline ymm cnj_getwp2(const_complex_vector W, const int p) noexcept { const int sp = s*p; return cnjpz2(cmplx2(W[sp], W[sp+s])); } static inline xmm getlo(const ymm& a_b) noexcept force_inline; static inline xmm getlo(const ymm& a_b) noexcept { return a_b.lo; } static inline xmm gethi(const ymm& a_b) noexcept force_inline; static inline xmm gethi(const ymm& a_b) noexcept { return a_b.hi; } template <int s> static inline ymm getpz3(const_complex_vector z) noexcept force_inline2; template <int s> static inline ymm getpz3(const_complex_vector z) noexcept { return cmplx2(z[0], z[s]); } template <int s> static inline void setpz3(complex_vector z, const ymm& y) noexcept force_inline3; template <int s> static inline void setpz3(complex_vector z, const ymm& y) noexcept { setpz(z[0], getlo(y)); setpz(z[s], gethi(y)); } } // namespace OTFFT_MISC #endif // USE_AVX //============================================================================= // Aligned Memory Allocator //============================================================================= namespace OTFFT_MISC { template <class T> struct simd_array { T* p; simd_array() noexcept : p(0) {} simd_array(int n) : p((T*) simd_malloc(n*sizeof(T))) { if (p == 0) throw std::bad_alloc(); } ~simd_array() { if (p) simd_free(p); } void setup(int n) { if (p) simd_free(p); p = (T*) simd_malloc(n*sizeof(T)); if (p == 0) throw std::bad_alloc(); } void destroy() { if (p) simd_free(p); p = 0; } T& operator[](int i) noexcept { return p[i]; } const T& operator[](int i) const noexcept { return p[i]; } T* operator&() const noexcept { return p; } }; } // namespace OTFFT_MISC //=============================================================================
test.c
#include <stdio.h> #include "../utilities/check.h" #define N 100 int main() { check_offloading(); // Initialisation int fail = 0; int error = 0; int a[N]; int b[N]; /* * Atomics update (implicit) */ // Initialise a[0] = 0; // Execute #pragma omp target map(tofrom: a[:1]) { #pragma omp parallel for for(int ii = 0; ii < N; ++ii) #pragma omp atomic a[0]++; } // Check result int result = a[0]; int expect = N; if (result != expect) { printf("update (implicit) a %d != %d (error %d)\n", result, expect, ++error); fail = 1; } if(!fail) printf("successful\n"); else fail = 0; /* * Atomics update (explicit) */ // Initialise a[0] = 0; // Execute #pragma omp target map(tofrom: a[:1]) { #pragma omp parallel for for(int ii = 0; ii < N; ++ii) #pragma omp atomic update a[0]++; } // Check result result = a[0]; expect = N; if (result != expect) { printf("update (explicit) a %d != %d (error %d)\n", result, expect, ++error); fail = 1; } if(!fail) printf("successful\n"); else fail = 0; /* * Atomic capture */ // Initilisation a[0] = 0; for(int ii = 0; ii < N; ++ii) b[ii] = -1; // Execute #pragma omp target map(tofrom: a[:1], b[:N]) { #pragma omp parallel for for(int ii = 0; ii < N; ++ii) { int v = 0; #pragma omp atomic capture v = a[0]++; b[ii] = v; } } // Check result result = a[0]; expect = N; if (result != expect) { printf("capture a %d != %d (error %d)\n", result, expect, ++error); fail = 1; } // Make sure every increment was captured, regardless of order for(int ii = 0; ii < N; ++ii) { int pass = 0; expect = ii; for(int jj = 0; jj < N; ++jj) { result = b[jj]; if(result == expect) pass = 1; } if (!pass) { printf("capture b %d not captured (error %d)\n", expect, ++error); fail = 1; } } if(!fail) printf("successful\n"); else fail = 0; /* * Atomic write */ // Initialisation for(int ii = 0; ii < N; ++ii) a[ii] = 0; // Execute #pragma omp target map(tofrom: a[:N]) { #pragma omp parallel for for(int ii = 0; ii < N; ++ii) #pragma omp atomic write a[ii] = ii; } // Check result for(int ii = 0; ii < N; ++ii) { result = a[ii]; expect = ii; if (result != expect) { printf("write a %d != %d (error %d)\n", result, expect, ++error); fail = 1; } } if(!fail) printf("successful\n"); else fail = 0; /* * Atomic read */ // Initialisation for(int ii = 0; ii < N; ++ii) { a[ii] = 0; b[ii] = ii; } // Execute #pragma omp target map(tofrom: a[:N], b[:N]) { #pragma omp parallel for for(int ii = 0; ii < N; ++ii) #pragma omp atomic read a[ii] = b[ii]; } // Check result for(int ii = 0; ii < N; ++ii) { result = a[ii]; expect = b[ii]; if (result != expect) { printf("ar a %d != %d (error %d)\n", result, expect, ++error); fail = 1; } } if(!fail) printf("successful\n"); else fail = 0; /* * Atomics update with multiple teams */ // Initialise a[0] = 0; // Execute #pragma omp target map(tofrom: a[:1]) { #pragma omp teams num_teams(10) thread_limit(10) #pragma omp distribute parallel for for(int ii = 0; ii < N; ++ii) #pragma omp atomic a[0]++; } // Check result result = a[0]; expect = N; if (result != expect) { printf("Multi Team a %d != %d (error %d)\n", result, expect, ++error); fail = 1; } if(!fail) printf("successful\n"); else fail = 0; /* * Atomics seq_cst */ // Initialise a[0] = 0; // Execute #pragma omp target map(tofrom: a[:1]) { #pragma omp parallel for for(int ii = 0; ii < N; ++ii) #pragma omp atomic seq_cst a[0]++; } // Check result result = a[0]; expect = N; if (result != expect) { printf("Using seq_cst a %d != %d (error %d)\n", result, expect, ++error); fail = 1; } if(!fail) printf("successful\n"); else fail = 0; // Report printf("done with %d errors\n", error); return error; }
vednnConvolutionBackwardData.c
#include "vednnConvolutionBackwardData.h" #include <stdint.h> #ifdef VEDNN_USE_OPENMP #include <omp.h> extern int __vednn_omp_num_threads ; #endif static inline vednnError_t vednnConvolutionBackwardData_wrapper( vednnConvBackwardData_t pFunc, const vednnTensorParam_t *pParamGradOut, const void *pDataGradOut, const vednnFilterParam_t *pParamKernel, const void *pDataKernel, const vednnConvolutionParam_t *pParamConv, const vednnTensorParam_t *pParamGradIn, void *pDataGradIn ) { //#ifdef VEDNN_USE_OPENMP // if ( __vednn_omp_num_threads == 1 ) { // return pFunc(pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, // pParamConv, pParamGradIn, pDataGradIn ); // } // else { vednnError_t rc = VEDNN_SUCCESS ; //#pragma omp parallel reduction(|:rc) // { int64_t nthreads = omp_get_num_threads() ; int64_t threadid = omp_get_thread_num() ; int64_t allBatch = pParamGradOut->batch ; int64_t nBatch = allBatch / nthreads ; int64_t remain = allBatch % nthreads ; int64_t batchBegin = nBatch * threadid + ( threadid < remain ? threadid : remain ) ; int64_t myBatch = nBatch + ( threadid < remain ? 1 : 0 ) ; if( myBatch == 0 ) { rc |= VEDNN_SUCCESS ; } else { vednnTensorParam_t _pParamGradOut = *pParamGradOut; _pParamGradOut.batch = myBatch ; vednnTensorParam_t _pParamGradIn = *pParamGradIn ; _pParamGradIn.batch = myBatch ; float* _pDataGradOut = ((float *)pDataGradOut) + batchBegin * pParamGradOut->channel * pParamGradOut->height * pParamGradOut->width ; float* _pDataGradIn = ((float *)pDataGradIn) + batchBegin * pParamGradIn->channel * pParamGradIn->height * pParamGradIn->width ; rc |= pFunc(&_pParamGradOut, (void*)_pDataGradOut, pParamKernel, pDataKernel, pParamConv, &_pParamGradIn, (void*) _pDataGradIn) ; } // } return rc ; // } //#else // return pFunc(pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, // pParamConv, pParamGradIn, pDataGradIn ); //#endif } /* ----------------------------------------------------------------------- */ vednnError_t vednnConvolutionBackwardData( const vednnTensorParam_t *pParamGradOut, const void *pDataGradOut, const vednnFilterParam_t *pParamKernel, const void *pDataKernel, const vednnTensorParam_t *pParamGradIn, void *pDataGradIn, const vednnConvolutionParam_t *pParamConv, vednnConvolutionAlgorithm_t algo ) { if (algo == VEDNN_CONV_ALGORITHM_DIRECT) { // [todo] add variations if ( pParamGradIn->height * pParamGradIn->width <= 16 || ( pParamGradIn->height * pParamGradIn->width < 64 && pParamGradIn->height * pParamGradIn->width < pParamGradIn->channel )) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_vecC, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } else if (pParamConv->strideHeight == 1 && pParamConv->strideWidth == 1 && pParamConv->dilationHeight == 1 && pParamConv->dilationWidth == 1 ) { if( pParamGradIn->height == pParamGradOut->height && pParamGradIn->width == pParamGradOut->width ) { if( pParamKernel->height == 5 && pParamKernel->width == 5) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str1_padsame_ker5, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } else if( pParamKernel->height == 3 && pParamKernel->width == 3) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str1_padsame_ker3, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } else if( pParamKernel->height == 2 && pParamKernel->width == 2) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str1_padsame_ker2, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } else if( pParamKernel->height == 1 && pParamKernel->width == 1) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str1_padsame_ker1, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } else { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str1_padsame, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } } else if( pParamConv->padHeight == 0 && pParamConv->padWidth == 0 && pParamKernel->height == 3 && pParamKernel->width == 3 && (pParamGradIn->width & 0x01) == 0 && pParamGradIn->width <=256 && (pParamGradOut->width & 0x01) == 0 && (((uint64_t)pDataGradIn) & 0x07) == 0 && (((uint64_t)pDataGradOut) & 0x07) == 0 ) { if( pParamGradIn->width <=32 ) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str1_pad0_ker3_iw2XU32_ow2X_ioaligned, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } else { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str1_pad0_ker3_iw2XU256_ow2X_ioaligned, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } } else if (pParamGradIn->width <= 128) { if( pParamConv->padHeight == 0 && pParamConv->padWidth == 0 && pParamKernel->height == 3 && pParamKernel->width == 3 ) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str1_pad0_ker3_iwU128, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } else { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str1_iwU128, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } } else { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str1, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } } else { if( pParamConv->dilationHeight == 1 && pParamConv->dilationWidth == 1 && pParamConv->padHeight == 0 && pParamConv->padWidth == 0 && pParamKernel->height == 1 && pParamKernel->width == 1 && pParamGradOut->width <= 128 ) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_pad0_ker1_owU128, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } if( pParamKernel->height == 5 && pParamKernel->width == 5 && pParamConv->dilationHeight == 1 && pParamConv->dilationWidth == 1 && pParamConv->strideHeight == 2 && pParamConv->strideWidth == 2 && pParamConv->padHeight == 2 && pParamConv->padWidth == 2 ) { if (pParamGradIn->width <= 128) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str2_pad2_ker5_iwU128, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } else { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_dil1_str2_pad2_ker5, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } } // no else if (pParamGradIn->width <= 128) { if( pParamKernel->height == 3 && pParamKernel->width == 3 ) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_ker3_iwU128, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } else if( pParamKernel->height == 5 && pParamKernel->width == 5 ) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_ker5_iwU128, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } else { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_iwU128, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } } else { if( pParamKernel->height == 5 && pParamKernel->width == 5 ) { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_ker5, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } else { return vednnConvolutionBackwardData_wrapper( vednnConvolutionBackwardData_direct_default, pParamGradOut, pDataGradOut, pParamKernel, pDataKernel, pParamConv, pParamGradIn, pDataGradIn ); } } } } else { return VEDNN_ERROR_INVALID_PARAM ; } }
nanort.h
// // NanoRT, single header only modern ray tracing kernel. // // // Notes : The number of primitives are up to 2G. If you want to render large // data, please split data into chunks(~ 2G prims) and use NanoSG scene graph // library(`${nanort}/examples/nanosg`). // /* The MIT License (MIT) Copyright (c) 2015 - 2019 Light Transport Entertainment, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef NANORT_H_ #define NANORT_H_ #include <algorithm> #include <cassert> #include <cmath> #include <cstdio> #include <cstdlib> #include <cstring> #include <functional> #include <limits> #include <memory> #include <queue> #include <string> #include <vector> // compiler macros // // NANORT_USE_CPP11_FEATURE : Enable C++11 feature // NANORT_ENABLE_PARALLEL_BUILD : Enable parallel BVH build. // NANORT_ENABLE_SERIALIZATION : Enable serialization feature for built BVH. // // Parallelized BVH build is supported on C++11 thread version. // OpenMP version is not fully tested. // thus turn off if you face a problem when building BVH in parallel. // #define NANORT_ENABLE_PARALLEL_BUILD // Some constants #define kNANORT_MAX_STACK_DEPTH (512) #define kNANORT_MIN_PRIMITIVES_FOR_PARALLEL_BUILD (1024 * 8) #define kNANORT_SHALLOW_DEPTH (4) // will create 2**N subtrees #ifdef NANORT_USE_CPP11_FEATURE // Assume C++11 compiler has thread support. // In some situation (e.g. embedded system, JIT compilation), thread feature // may not be available though... #include <atomic> #include <mutex> #include <thread> #define kNANORT_MAX_THREADS (256) // Parallel build should work well for C++11 version, thus force enable it. #ifndef NANORT_ENABLE_PARALLEL_BUILD #define NANORT_ENABLE_PARALLEL_BUILD #endif #endif namespace nanort { // RayType typedef enum { RAY_TYPE_NONE = 0x0, RAY_TYPE_PRIMARY = 0x1, RAY_TYPE_SECONDARY = 0x2, RAY_TYPE_DIFFUSE = 0x4, RAY_TYPE_REFLECTION = 0x8, RAY_TYPE_REFRACTION = 0x10 } RayType; #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif // ---------------------------------------------------------------------------- // Small vector class useful for multi-threaded environment. // // stack_container.h // // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This allocator can be used with STL containers to provide a stack buffer // from which to allocate memory and overflows onto the heap. This stack buffer // would be allocated on the stack and allows us to avoid heap operations in // some situations. // // STL likes to make copies of allocators, so the allocator itself can't hold // the data. Instead, we make the creator responsible for creating a // StackAllocator::Source which contains the data. Copying the allocator // merely copies the pointer to this shared source, so all allocators created // based on our allocator will share the same stack buffer. // // This stack buffer implementation is very simple. The first allocation that // fits in the stack buffer will use the stack buffer. Any subsequent // allocations will not use the stack buffer, even if there is unused room. // This makes it appropriate for array-like containers, but the caller should // be sure to reserve() in the container up to the stack buffer size. Otherwise // the container will allocate a small array which will "use up" the stack // buffer. template <typename T, size_t stack_capacity> class StackAllocator : public std::allocator<T> { public: typedef typename std::allocator<T>::pointer pointer; typedef typename std::allocator<T>::size_type size_type; // Backing store for the allocator. The container owner is responsible for // maintaining this for as long as any containers using this allocator are // live. struct Source { Source() : used_stack_buffer_(false) {} // Casts the buffer in its right type. T *stack_buffer() { return reinterpret_cast<T *>(stack_buffer_); } const T *stack_buffer() const { return reinterpret_cast<const T *>(stack_buffer_); } // // IMPORTANT: Take care to ensure that stack_buffer_ is aligned // since it is used to mimic an array of T. // Be careful while declaring any unaligned types (like bool) // before stack_buffer_. // // The buffer itself. It is not of type T because we don't want the // constructors and destructors to be automatically called. Define a POD // buffer of the right size instead. char stack_buffer_[sizeof(T[stack_capacity])]; // Set when the stack buffer is used for an allocation. We do not track // how much of the buffer is used, only that somebody is using it. bool used_stack_buffer_; }; // Used by containers when they want to refer to an allocator of type U. template <typename U> struct rebind { typedef StackAllocator<U, stack_capacity> other; }; // For the straight up copy c-tor, we can share storage. StackAllocator(const StackAllocator<T, stack_capacity> &rhs) : source_(rhs.source_) {} // ISO C++ requires the following constructor to be defined, // and std::vector in VC++2008SP1 Release fails with an error // in the class _Container_base_aux_alloc_real (from <xutility>) // if the constructor does not exist. // For this constructor, we cannot share storage; there's // no guarantee that the Source buffer of Ts is large enough // for Us. // TODO(Google): If we were fancy pants, perhaps we could share storage // iff sizeof(T) == sizeof(U). template <typename U, size_t other_capacity> StackAllocator(const StackAllocator<U, other_capacity> &other) : source_(NULL) { (void)other; } explicit StackAllocator(Source *source) : source_(source) {} // Actually do the allocation. Use the stack buffer if nobody has used it yet // and the size requested fits. Otherwise, fall through to the standard // allocator. pointer allocate(size_type n, void *hint = 0) { if (source_ != NULL && !source_->used_stack_buffer_ && n <= stack_capacity) { source_->used_stack_buffer_ = true; return source_->stack_buffer(); } else { return std::allocator<T>::allocate(n, hint); } } // Free: when trying to free the stack buffer, just mark it as free. For // non-stack-buffer pointers, just fall though to the standard allocator. void deallocate(pointer p, size_type n) { if (source_ != NULL && p == source_->stack_buffer()) source_->used_stack_buffer_ = false; else std::allocator<T>::deallocate(p, n); } private: Source *source_; }; // A wrapper around STL containers that maintains a stack-sized buffer that the // initial capacity of the vector is based on. Growing the container beyond the // stack capacity will transparently overflow onto the heap. The container must // support reserve(). // // WATCH OUT: the ContainerType MUST use the proper StackAllocator for this // type. This object is really intended to be used only internally. You'll want // to use the wrappers below for different types. template <typename TContainerType, int stack_capacity> class StackContainer { public: typedef TContainerType ContainerType; typedef typename ContainerType::value_type ContainedType; typedef StackAllocator<ContainedType, stack_capacity> Allocator; // Allocator must be constructed before the container! StackContainer() : allocator_(&stack_data_), container_(allocator_) { // Make the container use the stack allocation by reserving our buffer size // before doing anything else. container_.reserve(stack_capacity); } // Getters for the actual container. // // Danger: any copies of this made using the copy constructor must have // shorter lifetimes than the source. The copy will share the same allocator // and therefore the same stack buffer as the original. Use std::copy to // copy into a "real" container for longer-lived objects. ContainerType &container() { return container_; } const ContainerType &container() const { return container_; } // Support operator-> to get to the container. This allows nicer syntax like: // StackContainer<...> foo; // std::sort(foo->begin(), foo->end()); ContainerType *operator->() { return &container_; } const ContainerType *operator->() const { return &container_; } #ifdef UNIT_TEST // Retrieves the stack source so that that unit tests can verify that the // buffer is being used properly. const typename Allocator::Source &stack_data() const { return stack_data_; } #endif protected: typename Allocator::Source stack_data_; unsigned char pad_[7]; Allocator allocator_; ContainerType container_; // DISALLOW_EVIL_CONSTRUCTORS(StackContainer); StackContainer(const StackContainer &); void operator=(const StackContainer &); }; // StackVector // // Example: // StackVector<int, 16> foo; // foo->push_back(22); // we have overloaded operator-> // foo[0] = 10; // as well as operator[] template <typename T, size_t stack_capacity> class StackVector : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity> { public: StackVector() : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() {} // We need to put this in STL containers sometimes, which requires a copy // constructor. We can't call the regular copy constructor because that will // take the stack buffer from the original. Here, we create an empty object // and make a stack buffer of its own. StackVector(const StackVector<T, stack_capacity> &other) : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() { this->container().assign(other->begin(), other->end()); } StackVector<T, stack_capacity> &operator=( const StackVector<T, stack_capacity> &other) { this->container().assign(other->begin(), other->end()); return *this; } // Vectors are commonly indexed, which isn't very convenient even with // operator-> (using "->at()" does exception stuff we don't want). T &operator[](size_t i) { return this->container().operator[](i); } const T &operator[](size_t i) const { return this->container().operator[](i); } }; // ---------------------------------------------------------------------------- template <typename T = float> class real3 { public: real3() {} real3(T x) { v[0] = x; v[1] = x; v[2] = x; } real3(T xx, T yy, T zz) { v[0] = xx; v[1] = yy; v[2] = zz; } explicit real3(const T *p) { v[0] = p[0]; v[1] = p[1]; v[2] = p[2]; } inline T x() const { return v[0]; } inline T y() const { return v[1]; } inline T z() const { return v[2]; } real3 operator*(T f) const { return real3(x() * f, y() * f, z() * f); } real3 operator-(const real3 &f2) const { return real3(x() - f2.x(), y() - f2.y(), z() - f2.z()); } real3 operator*(const real3 &f2) const { return real3(x() * f2.x(), y() * f2.y(), z() * f2.z()); } real3 operator+(const real3 &f2) const { return real3(x() + f2.x(), y() + f2.y(), z() + f2.z()); } real3 &operator+=(const real3 &f2) { v[0] += f2.x(); v[1] += f2.y(); v[2] += f2.z(); return (*this); } real3 operator/(const real3 &f2) const { return real3(x() / f2.x(), y() / f2.y(), z() / f2.z()); } real3 operator-() const { return real3(-x(), -y(), -z()); } T operator[](int i) const { return v[i]; } T &operator[](int i) { return v[i]; } T v[3]; // T pad; // for alignment (when T = float) }; template <typename T> inline real3<T> operator*(T f, const real3<T> &v) { return real3<T>(v.x() * f, v.y() * f, v.z() * f); } template <typename T> inline real3<T> vneg(const real3<T> &rhs) { return real3<T>(-rhs.x(), -rhs.y(), -rhs.z()); } template <typename T> inline T vlength(const real3<T> &rhs) { return std::sqrt(rhs.x() * rhs.x() + rhs.y() * rhs.y() + rhs.z() * rhs.z()); } template <typename T> inline real3<T> vnormalize(const real3<T> &rhs) { real3<T> v = rhs; T len = vlength(rhs); if (std::fabs(len) > std::numeric_limits<T>::epsilon()) { T inv_len = static_cast<T>(1.0) / len; v.v[0] *= inv_len; v.v[1] *= inv_len; v.v[2] *= inv_len; } return v; } template <typename T> inline real3<T> vcross(const real3<T> a, const real3<T> b) { real3<T> c; c[0] = a[1] * b[2] - a[2] * b[1]; c[1] = a[2] * b[0] - a[0] * b[2]; c[2] = a[0] * b[1] - a[1] * b[0]; return c; } template <typename T> inline T vdot(const real3<T> a, const real3<T> b) { return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; } template <typename T> inline real3<T> vsafe_inverse(const real3<T> v) { real3<T> r; #ifdef NANORT_USE_CPP11_FEATURE if (std::fabs(v[0]) < std::numeric_limits<T>::epsilon()) { r[0] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[0]); } else { r[0] = static_cast<T>(1.0) / v[0]; } if (std::fabs(v[1]) < std::numeric_limits<T>::epsilon()) { r[1] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[1]); } else { r[1] = static_cast<T>(1.0) / v[1]; } if (std::fabs(v[2]) < std::numeric_limits<T>::epsilon()) { r[2] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[2]); } else { r[2] = static_cast<T>(1.0) / v[2]; } #else if (std::fabs(v[0]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[0] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[0] = std::numeric_limits<T>::infinity() * sgn; } else { r[0] = static_cast<T>(1.0) / v[0]; } if (std::fabs(v[1]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[1] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[1] = std::numeric_limits<T>::infinity() * sgn; } else { r[1] = static_cast<T>(1.0) / v[1]; } if (std::fabs(v[2]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[2] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[2] = std::numeric_limits<T>::infinity() * sgn; } else { r[2] = static_cast<T>(1.0) / v[2]; } #endif return r; } template <typename real> inline const real *get_vertex_addr(const real *p, const size_t idx, const size_t stride_bytes) { return reinterpret_cast<const real *>( reinterpret_cast<const unsigned char *>(p) + idx * stride_bytes); } template <typename T = float> class Ray { public: Ray() : min_t(static_cast<T>(0.0)), max_t(std::numeric_limits<T>::max()), type(RAY_TYPE_NONE) { org[0] = static_cast<T>(0.0); org[1] = static_cast<T>(0.0); org[2] = static_cast<T>(0.0); dir[0] = static_cast<T>(0.0); dir[1] = static_cast<T>(0.0); dir[2] = static_cast<T>(-1.0); } T org[3]; // must set T dir[3]; // must set T min_t; // minimum ray hit distance. T max_t; // maximum ray hit distance. unsigned int type; // ray type // TODO(LTE): Align sizeof(Ray) }; template <typename T = float> class BVHNode { public: BVHNode() {} BVHNode(const BVHNode &rhs) { bmin[0] = rhs.bmin[0]; bmin[1] = rhs.bmin[1]; bmin[2] = rhs.bmin[2]; flag = rhs.flag; bmax[0] = rhs.bmax[0]; bmax[1] = rhs.bmax[1]; bmax[2] = rhs.bmax[2]; axis = rhs.axis; data[0] = rhs.data[0]; data[1] = rhs.data[1]; } BVHNode &operator=(const BVHNode &rhs) { bmin[0] = rhs.bmin[0]; bmin[1] = rhs.bmin[1]; bmin[2] = rhs.bmin[2]; flag = rhs.flag; bmax[0] = rhs.bmax[0]; bmax[1] = rhs.bmax[1]; bmax[2] = rhs.bmax[2]; axis = rhs.axis; data[0] = rhs.data[0]; data[1] = rhs.data[1]; return (*this); } ~BVHNode() {} T bmin[3]; T bmax[3]; int flag; // 1 = leaf node, 0 = branch node int axis; // leaf // data[0] = npoints // data[1] = index // // branch // data[0] = child[0] // data[1] = child[1] unsigned int data[2]; }; template <class H> class IntersectComparator { public: bool operator()(const H &a, const H &b) const { return a.t < b.t; } }; /// BVH build option. template <typename T = float> struct BVHBuildOptions { T cost_t_aabb; unsigned int min_leaf_primitives; unsigned int max_tree_depth; unsigned int bin_size; unsigned int shallow_depth; unsigned int min_primitives_for_parallel_build; // Cache bounding box computation. // Requires more memory, but BVHbuild can be faster. bool cache_bbox; unsigned char pad[3]; // Set default value: Taabb = 0.2 BVHBuildOptions() : cost_t_aabb(static_cast<T>(0.2)), min_leaf_primitives(4), max_tree_depth(256), bin_size(64), shallow_depth(kNANORT_SHALLOW_DEPTH), min_primitives_for_parallel_build( kNANORT_MIN_PRIMITIVES_FOR_PARALLEL_BUILD), cache_bbox(false) {} }; /// BVH build statistics. class BVHBuildStatistics { public: unsigned int max_tree_depth; unsigned int num_leaf_nodes; unsigned int num_branch_nodes; float build_secs; // Set default value: Taabb = 0.2 BVHBuildStatistics() : max_tree_depth(0), num_leaf_nodes(0), num_branch_nodes(0), build_secs(0.0f) {} }; /// /// @brief BVH trace option. /// class BVHTraceOptions { public: // Hit only for face IDs in indexRange. // This feature is good to mimic something like glDrawArrays() unsigned int prim_ids_range[2]; // Prim ID to skip for avoiding self-intersection // -1 = no skipping unsigned int skip_prim_id; bool cull_back_face; unsigned char pad[3]; ///< Padding (not used) BVHTraceOptions() { prim_ids_range[0] = 0; prim_ids_range[1] = 0x7FFFFFFF; // Up to 2G face IDs. skip_prim_id = static_cast<unsigned int>(-1); cull_back_face = false; } }; /// /// @brief Bounding box. /// template <typename T> class BBox { public: real3<T> bmin; real3<T> bmax; BBox() { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); } }; /// /// @brief Hit class for traversing nodes. /// /// Stores hit information of node traversal. /// Node traversal is used for two-level ray tracing(efficient ray traversal of a scene hierarchy) /// template <typename T> class NodeHit { public: NodeHit() : t_min(std::numeric_limits<T>::max()), t_max(-std::numeric_limits<T>::max()), node_id(static_cast<unsigned int>(-1)) {} NodeHit(const NodeHit<T> &rhs) { t_min = rhs.t_min; t_max = rhs.t_max; node_id = rhs.node_id; } NodeHit &operator=(const NodeHit<T> &rhs) { t_min = rhs.t_min; t_max = rhs.t_max; node_id = rhs.node_id; return (*this); } ~NodeHit() {} T t_min; T t_max; unsigned int node_id; }; /// /// @brief Comparator object for NodeHit. /// /// Comparator object for finding nearest hit point in node traversal. /// template <typename T> class NodeHitComparator { public: inline bool operator()(const NodeHit<T> &a, const NodeHit<T> &b) { return a.t_min < b.t_min; } }; /// /// @brief Bounding Volume Hierarchy acceleration. /// /// BVHAccel is central part of ray tracing(ray traversal). /// BVHAccel takes an input geometry(primitive) information and build a data structure /// for efficient ray tracing(`O(log2 N)` in theory, where N is the number of primitive in the scene). /// /// @tparam T real value type(float or double). /// template <typename T> class BVHAccel { public: BVHAccel() : pad0_(0) { (void)pad0_; } ~BVHAccel() {} /// /// Build BVH for input primitives. /// /// @tparam Prim Primitive(e.g. Triangle) accessor class. /// @tparam Pred Predicator(comparator class object for `Prim` class to find nearest hit point) /// /// @param[in] num_primitives The number of primitive. /// @param[in] p Primitive accessor class object. /// @param[in] pred Predicator object. /// /// @return true upon success. /// template <class Prim, class Pred> bool Build(const unsigned int num_primitives, const Prim &p, const Pred &pred, const BVHBuildOptions<T> &options = BVHBuildOptions<T>()); /// /// Get statistics of built BVH tree. Valid after `Build()` /// /// @return BVH build statistics. /// BVHBuildStatistics GetStatistics() const { return stats_; } #if defined(NANORT_ENABLE_SERIALIZATION) /// /// Dump built BVH to the file. /// bool Dump(const char *filename) const; bool Dump(FILE *fp) const; /// /// Load BVH binary /// bool Load(const char *filename); bool Load(FILE *fp); #endif void Debug(); /// /// @brief Traverse into BVH along ray and find closest hit point & primitive if /// found /// /// @tparam I Intersector class /// @tparam H Hit class /// /// @param[in] ray Input ray /// @param[in] intersector Intersector object. This object is called for each possible intersection of ray and BVH during traversal. /// @param[out] isect Intersection point information(filled when closest hit point was found) /// @param[in] options Traversal options. /// /// @return true if the closest hit point found. /// template <class I, class H> bool Traverse(const Ray<T> &ray, const I &intersector, H *isect, const BVHTraceOptions &options = BVHTraceOptions()) const; #if 0 /// Multi-hit ray traversal /// Returns `max_intersections` frontmost intersections template<class I, class H, class Comp> bool MultiHitTraverse(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<H, 128> *isects, const BVHTraceOptions &options = BVHTraceOptions()) const; #endif /// /// List up nodes which intersects along the ray. /// This function is useful for two-level BVH traversal. /// See `examples/nanosg` for example. /// /// @tparam I Intersection class /// /// /// template <class I> bool ListNodeIntersections(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<NodeHit<T>, 128> *hits) const; const std::vector<BVHNode<T> > &GetNodes() const { return nodes_; } const std::vector<unsigned int> &GetIndices() const { return indices_; } /// /// Returns bounding box of built BVH. /// void BoundingBox(T bmin[3], T bmax[3]) const { if (nodes_.empty()) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); } else { bmin[0] = nodes_[0].bmin[0]; bmin[1] = nodes_[0].bmin[1]; bmin[2] = nodes_[0].bmin[2]; bmax[0] = nodes_[0].bmax[0]; bmax[1] = nodes_[0].bmax[1]; bmax[2] = nodes_[0].bmax[2]; } } bool IsValid() const { return nodes_.size() > 0; } private: #if defined(NANORT_ENABLE_PARALLEL_BUILD) typedef struct { unsigned int left_idx; unsigned int right_idx; unsigned int offset; } ShallowNodeInfo; // Used only during BVH construction std::vector<ShallowNodeInfo> shallow_node_infos_; /// Builds shallow BVH tree recursively. template <class P, class Pred> unsigned int BuildShallowTree(std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred); #endif /// Builds BVH tree recursively. template <class P, class Pred> unsigned int BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred); template <class I> bool TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const; template <class I> bool TestLeafNodeIntersections( const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections, const I &intersector, std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > *isect_pq) const; #if 0 template<class I, class H, class Comp> bool MultiHitTestLeafNode(std::priority_queue<H, std::vector<H>, Comp> *isect_pq, int max_intersections, const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const; #endif std::vector<BVHNode<T> > nodes_; std::vector<unsigned int> indices_; // max 4G triangles. std::vector<BBox<T> > bboxes_; BVHBuildOptions<T> options_; BVHBuildStatistics stats_; unsigned int pad0_; }; // Predefined SAH predicator for triangle. template <typename T = float> class TriangleSAHPred { public: TriangleSAHPred( const T *vertices, const unsigned int *faces, size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ : axis_(0), pos_(static_cast<T>(0.0)), vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} TriangleSAHPred(const TriangleSAHPred<T> &rhs) : axis_(rhs.axis_), pos_(rhs.pos_), vertices_(rhs.vertices_), faces_(rhs.faces_), vertex_stride_bytes_(rhs.vertex_stride_bytes_) {} TriangleSAHPred<T> &operator=(const TriangleSAHPred<T> &rhs) { axis_ = rhs.axis_; pos_ = rhs.pos_; vertices_ = rhs.vertices_; faces_ = rhs.faces_; vertex_stride_bytes_ = rhs.vertex_stride_bytes_; return (*this); } void Set(int axis, T pos) const { axis_ = axis; pos_ = pos; } bool operator()(unsigned int i) const { int axis = axis_; T pos = pos_; unsigned int i0 = faces_[3 * i + 0]; unsigned int i1 = faces_[3 * i + 1]; unsigned int i2 = faces_[3 * i + 2]; real3<T> p0(get_vertex_addr<T>(vertices_, i0, vertex_stride_bytes_)); real3<T> p1(get_vertex_addr<T>(vertices_, i1, vertex_stride_bytes_)); real3<T> p2(get_vertex_addr<T>(vertices_, i2, vertex_stride_bytes_)); T center = p0[axis] + p1[axis] + p2[axis]; return (center < pos * static_cast<T>(3.0)); } private: mutable int axis_; mutable T pos_; const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; }; // Predefined Triangle mesh geometry. template <typename T = float> class TriangleMesh { public: TriangleMesh( const T *vertices, const unsigned int *faces, const size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ : vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} /// Compute bounding box for `prim_index`th triangle. /// This function is called for each primitive in BVH build. void BoundingBox(real3<T> *bmin, real3<T> *bmax, unsigned int prim_index) const { unsigned vertex = faces_[3 * prim_index + 0]; (*bmin)[0] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[0]; (*bmin)[1] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[1]; (*bmin)[2] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[2]; (*bmax)[0] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[0]; (*bmax)[1] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[1]; (*bmax)[2] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[2]; // remaining two vertices of the primitive for (unsigned int i = 1; i < 3; i++) { // xyz for (int k = 0; k < 3; k++) { T coord = get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]; (*bmin)[k] = std::min((*bmin)[k], coord); (*bmax)[k] = std::max((*bmax)[k], coord); } } } const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; // // Accessors // const T *GetVertices() const { return vertices_; } const unsigned int *GetFaces() const { return faces_; } size_t GetVertexStrideBytes() const { return vertex_stride_bytes_; } }; /// /// Stores intersection point information for triangle geometry. /// template <typename T = float> class TriangleIntersection { public: T u; T v; // Required member variables. T t; unsigned int prim_id; }; /// /// Intersector is a template class which implements intersection method and stores /// intesection point information(`H`) /// /// @tparam T Precision(float or double) /// @tparam H Intersection point information struct /// template <typename T = float, class H = TriangleIntersection<T> > class TriangleIntersector { public: // Initialize from mesh object. // M: mesh class template<class M> TriangleIntersector(const M &m) : vertices_(m.GetVertices()), faces_(m.GetFaces()), vertex_stride_bytes_(m.GetVertexStrideBytes()) {} template<class M> TriangleIntersector(const M *m) : vertices_(m->GetVertices()), faces_(m->GetFaces()), vertex_stride_bytes_(m->GetVertexStrideBytes()) {} TriangleIntersector(const T *vertices, const unsigned int *faces, const size_t vertex_stride_bytes) // e.g. // vertex_stride_bytes // = 12 = sizeof(float) // * 3 : vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} // For Watertight Ray/Triangle Intersection. typedef struct { T Sx; T Sy; T Sz; int kx; int ky; int kz; } RayCoeff; /// Do ray intersection stuff for `prim_index` th primitive and return hit /// distance `t`, barycentric coordinate `u` and `v`. /// Returns true if there's intersection. bool Intersect(T *t_inout, const unsigned int prim_index) const { if ((prim_index < trace_options_.prim_ids_range[0]) || (prim_index >= trace_options_.prim_ids_range[1])) { return false; } // Self-intersection test. if (prim_index == trace_options_.skip_prim_id) { return false; } const unsigned int f0 = faces_[3 * prim_index + 0]; const unsigned int f1 = faces_[3 * prim_index + 1]; const unsigned int f2 = faces_[3 * prim_index + 2]; const real3<T> p0(get_vertex_addr(vertices_, f0 + 0, vertex_stride_bytes_)); const real3<T> p1(get_vertex_addr(vertices_, f1 + 0, vertex_stride_bytes_)); const real3<T> p2(get_vertex_addr(vertices_, f2 + 0, vertex_stride_bytes_)); const real3<T> A = p0 - ray_org_; const real3<T> B = p1 - ray_org_; const real3<T> C = p2 - ray_org_; const T Ax = A[ray_coeff_.kx] - ray_coeff_.Sx * A[ray_coeff_.kz]; const T Ay = A[ray_coeff_.ky] - ray_coeff_.Sy * A[ray_coeff_.kz]; const T Bx = B[ray_coeff_.kx] - ray_coeff_.Sx * B[ray_coeff_.kz]; const T By = B[ray_coeff_.ky] - ray_coeff_.Sy * B[ray_coeff_.kz]; const T Cx = C[ray_coeff_.kx] - ray_coeff_.Sx * C[ray_coeff_.kz]; const T Cy = C[ray_coeff_.ky] - ray_coeff_.Sy * C[ray_coeff_.kz]; T U = Cx * By - Cy * Bx; T V = Ax * Cy - Ay * Cx; T W = Bx * Ay - By * Ax; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wfloat-equal" #endif // Fall back to test against edges using double precision. if (U == static_cast<T>(0.0) || V == static_cast<T>(0.0) || W == static_cast<T>(0.0)) { double CxBy = static_cast<double>(Cx) * static_cast<double>(By); double CyBx = static_cast<double>(Cy) * static_cast<double>(Bx); U = static_cast<T>(CxBy - CyBx); double AxCy = static_cast<double>(Ax) * static_cast<double>(Cy); double AyCx = static_cast<double>(Ay) * static_cast<double>(Cx); V = static_cast<T>(AxCy - AyCx); double BxAy = static_cast<double>(Bx) * static_cast<double>(Ay); double ByAx = static_cast<double>(By) * static_cast<double>(Ax); W = static_cast<T>(BxAy - ByAx); } if (U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) { if (trace_options_.cull_back_face || (U > static_cast<T>(0.0) || V > static_cast<T>(0.0) || W > static_cast<T>(0.0))) { return false; } } T det = U + V + W; if (det == static_cast<T>(0.0)) return false; #ifdef __clang__ #pragma clang diagnostic pop #endif const T Az = ray_coeff_.Sz * A[ray_coeff_.kz]; const T Bz = ray_coeff_.Sz * B[ray_coeff_.kz]; const T Cz = ray_coeff_.Sz * C[ray_coeff_.kz]; const T D = U * Az + V * Bz + W * Cz; const T rcpDet = static_cast<T>(1.0) / det; T tt = D * rcpDet; if (tt > (*t_inout)) { return false; } if (tt < t_min_) { return false; } (*t_inout) = tt; // Use Möller-Trumbore style barycentric coordinates // U + V + W = 1.0 and interp(p) = U * p0 + V * p1 + W * p2 // We want interp(p) = (1 - u - v) * p0 + u * v1 + v * p2; // => u = V, v = W. u_ = V * rcpDet; v_ = W * rcpDet; return true; } /// Returns the nearest hit distance. T GetT() const { return t_; } /// Update is called when initializing intersection and nearest hit is found. void Update(T t, unsigned int prim_idx) const { t_ = t; prim_id_ = prim_idx; } /// Prepare BVH traversal (e.g. compute inverse ray direction) /// This function is called only once in BVH traversal. void PrepareTraversal(const Ray<T> &ray, const BVHTraceOptions &trace_options) const { ray_org_[0] = ray.org[0]; ray_org_[1] = ray.org[1]; ray_org_[2] = ray.org[2]; // Calculate dimension where the ray direction is maximal. ray_coeff_.kz = 0; T absDir = std::fabs(ray.dir[0]); if (absDir < std::fabs(ray.dir[1])) { ray_coeff_.kz = 1; absDir = std::fabs(ray.dir[1]); } if (absDir < std::fabs(ray.dir[2])) { ray_coeff_.kz = 2; absDir = std::fabs(ray.dir[2]); } ray_coeff_.kx = ray_coeff_.kz + 1; if (ray_coeff_.kx == 3) ray_coeff_.kx = 0; ray_coeff_.ky = ray_coeff_.kx + 1; if (ray_coeff_.ky == 3) ray_coeff_.ky = 0; // Swap kx and ky dimension to preserve winding direction of triangles. if (ray.dir[ray_coeff_.kz] < static_cast<T>(0.0)) std::swap(ray_coeff_.kx, ray_coeff_.ky); // Calculate shear constants. ray_coeff_.Sx = ray.dir[ray_coeff_.kx] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sy = ray.dir[ray_coeff_.ky] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sz = static_cast<T>(1.0) / ray.dir[ray_coeff_.kz]; trace_options_ = trace_options; t_min_ = ray.min_t; u_ = static_cast<T>(0.0); v_ = static_cast<T>(0.0); } /// Post BVH traversal stuff. /// Fill `isect` if there is a hit. void PostTraversal(const Ray<T> &ray, bool hit, H *isect) const { if (hit && isect) { (*isect).t = t_; (*isect).u = u_; (*isect).v = v_; (*isect).prim_id = prim_id_; } (void)ray; } private: const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; mutable real3<T> ray_org_; mutable RayCoeff ray_coeff_; mutable BVHTraceOptions trace_options_; mutable T t_min_; mutable T t_; mutable T u_; mutable T v_; mutable unsigned int prim_id_; }; // // Robust BVH Ray Traversal : http://jcgt.org/published/0002/02/02/paper.pdf // // NaN-safe min and max function. template <class T> const T &safemin(const T &a, const T &b) { return (a < b) ? a : b; } template <class T> const T &safemax(const T &a, const T &b) { return (a > b) ? a : b; } // // SAH functions // struct BinBuffer { explicit BinBuffer(unsigned int size) { bin_size = size; bin.resize(2 * 3 * size); clear(); } void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * bin_size); } std::vector<size_t> bin; // (min, max) * xyz * binsize unsigned int bin_size; unsigned int pad0; }; template <typename T> inline T CalculateSurfaceArea(const real3<T> &min, const real3<T> &max) { real3<T> box = max - min; return static_cast<T>(2.0) * (box[0] * box[1] + box[1] * box[2] + box[2] * box[0]); } template <typename T> inline void GetBoundingBoxOfTriangle(real3<T> *bmin, real3<T> *bmax, const T *vertices, const unsigned int *faces, unsigned int index) { unsigned int f0 = faces[3 * index + 0]; unsigned int f1 = faces[3 * index + 1]; unsigned int f2 = faces[3 * index + 2]; real3<T> p[3]; p[0] = real3<T>(&vertices[3 * f0]); p[1] = real3<T>(&vertices[3 * f1]); p[2] = real3<T>(&vertices[3 * f2]); (*bmin) = p[0]; (*bmax) = p[0]; for (int i = 1; i < 3; i++) { (*bmin)[0] = std::min((*bmin)[0], p[i][0]); (*bmin)[1] = std::min((*bmin)[1], p[i][1]); (*bmin)[2] = std::min((*bmin)[2], p[i][2]); (*bmax)[0] = std::max((*bmax)[0], p[i][0]); (*bmax)[1] = std::max((*bmax)[1], p[i][1]); (*bmax)[2] = std::max((*bmax)[2], p[i][2]); } } template <typename T, class P> inline void ContributeBinBuffer(BinBuffer *bins, // [out] const real3<T> &scene_min, const real3<T> &scene_max, unsigned int *indices, unsigned int left_idx, unsigned int right_idx, const P &p) { T bin_size = static_cast<T>(bins->bin_size); // Calculate extent real3<T> scene_size, scene_inv_size; scene_size = scene_max - scene_min; for (int i = 0; i < 3; ++i) { assert(scene_size[i] >= static_cast<T>(0.0)); if (scene_size[i] > static_cast<T>(0.0)) { scene_inv_size[i] = bin_size / scene_size[i]; } else { scene_inv_size[i] = static_cast<T>(0.0); } } // Clear bin data std::fill(bins->bin.begin(), bins->bin.end(), 0); // memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->bin_size)); size_t idx_bmin[3]; size_t idx_bmax[3]; for (size_t i = left_idx; i < right_idx; i++) { // // Quantize the position into [0, BIN_SIZE) // // q[i] = (int)(p[i] - scene_bmin) / scene_size // real3<T> bmin; real3<T> bmax; p.BoundingBox(&bmin, &bmax, indices[i]); // GetBoundingBoxOfTriangle(&bmin, &bmax, vertices, faces, indices[i]); real3<T> quantized_bmin = (bmin - scene_min) * scene_inv_size; real3<T> quantized_bmax = (bmax - scene_min) * scene_inv_size; // idx is now in [0, BIN_SIZE) for (int j = 0; j < 3; ++j) { int q0 = static_cast<int>(quantized_bmin[j]); if (q0 < 0) q0 = 0; int q1 = static_cast<int>(quantized_bmax[j]); if (q1 < 0) q1 = 0; idx_bmin[j] = static_cast<unsigned int>(q0); idx_bmax[j] = static_cast<unsigned int>(q1); if (idx_bmin[j] >= bin_size) idx_bmin[j] = static_cast<unsigned int>(bin_size) - 1; if (idx_bmax[j] >= bin_size) idx_bmax[j] = static_cast<unsigned int>(bin_size) - 1; // Increment bin counter bins->bin[0 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmin[j]] += 1; bins->bin[1 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmax[j]] += 1; } } } template <typename T> inline T SAH(size_t ns1, T leftArea, size_t ns2, T rightArea, T invS, T Taabb, T Ttri) { T sah; sah = static_cast<T>(2.0) * Taabb + (leftArea * invS) * static_cast<T>(ns1) * Ttri + (rightArea * invS) * static_cast<T>(ns2) * Ttri; return sah; } template <typename T> inline bool FindCutFromBinBuffer(T *cut_pos, // [out] xyz int *minCostAxis, // [out] const BinBuffer *bins, const real3<T> &bmin, const real3<T> &bmax, size_t num_primitives, T costTaabb) { // should be in [0.0, 1.0] const T kEPS = std::numeric_limits<T>::epsilon(); // * epsScale; size_t left, right; real3<T> bsize, bstep; real3<T> bminLeft, bmaxLeft; real3<T> bminRight, bmaxRight; T saLeft, saRight, saTotal; T pos; T minCost[3]; T costTtri = static_cast<T>(1.0) - costTaabb; (*minCostAxis) = 0; bsize = bmax - bmin; bstep = bsize * (static_cast<T>(1.0) / bins->bin_size); saTotal = CalculateSurfaceArea(bmin, bmax); T invSaTotal = static_cast<T>(0.0); if (saTotal > kEPS) { invSaTotal = static_cast<T>(1.0) / saTotal; } for (int j = 0; j < 3; ++j) { // // Compute SAH cost for the right side of each cell of the bbox. // Exclude both extreme sides of the bbox. // // i: 0 1 2 3 // +----+----+----+----+----+ // | | | | | | // +----+----+----+----+----+ // T minCostPos = bmin[j] + static_cast<T>(1.0) * bstep[j]; minCost[j] = std::numeric_limits<T>::max(); left = 0; right = num_primitives; bminLeft = bminRight = bmin; bmaxLeft = bmaxRight = bmax; for (int i = 0; i < static_cast<int>(bins->bin_size) - 1; ++i) { left += bins->bin[0 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; right -= bins->bin[1 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; assert(left <= num_primitives); assert(right <= num_primitives); // // Split pos bmin + (i + 1) * (bsize / BIN_SIZE) // +1 for i since we want a position on right side of the cell. // pos = bmin[j] + (i + static_cast<T>(1.0)) * bstep[j]; bmaxLeft[j] = pos; bminRight[j] = pos; saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft); saRight = CalculateSurfaceArea(bminRight, bmaxRight); T cost = SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri); if (cost < minCost[j]) { // // Update the min cost // minCost[j] = cost; minCostPos = pos; // minCostAxis = j; } } cut_pos[j] = minCostPos; } // cut_axis = minCostAxis; // cut_pos = minCostPos; // Find min cost axis T cost = minCost[0]; (*minCostAxis) = 0; if (cost > minCost[1]) { (*minCostAxis) = 1; cost = minCost[1]; } if (cost > minCost[2]) { (*minCostAxis) = 2; cost = minCost[2]; } return true; } #ifdef _OPENMP template <typename T, class P> void ComputeBoundingBoxOMP(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { { p.BoundingBox(bmin, bmax, indices[left_index]); } T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]}; T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]}; unsigned int n = right_index - left_index; #pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128)) { #pragma omp parallel for // for each face for (int i = int(left_index); i < int(right_index); i++) { unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); // xyz for (int k = 0; k < 3; k++) { (*bmin)[k] = std::min((*bmin)[k], bbox_min[k]); (*bmax)[k] = std::max((*bmax)[k], bbox_max[k]); } } #pragma omp critical { for (int k = 0; k < 3; k++) { (*bmin)[k] = std::min((*bmin)[k], local_bmin[k]); (*bmax)[k] = std::max((*bmax)[k], local_bmax[k]); } } } } #endif #ifdef NANORT_USE_CPP11_FEATURE template <typename T, class P> inline void ComputeBoundingBoxThreaded(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { unsigned int n = right_index - left_index; size_t num_threads = std::min( size_t(kNANORT_MAX_THREADS), std::max(size_t(1), size_t(std::thread::hardware_concurrency()))); if (n < num_threads) { num_threads = n; } std::vector<std::thread> workers; size_t ndiv = n / num_threads; std::vector<T> local_bmins(3 * num_threads); // 3 = xyz std::vector<T> local_bmaxs(3 * num_threads); // 3 = xyz for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&, t]() { size_t si = left_index + t * ndiv; size_t ei = (t == (num_threads - 1)) ? size_t(right_index) : std::min(left_index + (t + 1) * ndiv, size_t(right_index)); local_bmins[3 * t + 0] = std::numeric_limits<T>::infinity(); local_bmins[3 * t + 1] = std::numeric_limits<T>::infinity(); local_bmins[3 * t + 2] = std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 0] = -std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 1] = -std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 2] = -std::numeric_limits<T>::infinity(); // for each face for (size_t i = si; i < ei; i++) { unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); // xyz for (size_t k = 0; k < 3; k++) { local_bmins[3 * t + k] = std::min(local_bmins[3 * t + k], bbox_min[int(k)]); local_bmaxs[3 * t + k] = std::max(local_bmaxs[3 * t + k], bbox_max[int(k)]); } } })); } for (auto &t : workers) { t.join(); } // merge bbox for (size_t k = 0; k < 3; k++) { (*bmin)[int(k)] = local_bmins[k]; (*bmax)[int(k)] = local_bmaxs[k]; } for (size_t t = 1; t < num_threads; t++) { for (size_t k = 0; k < 3; k++) { (*bmin)[int(k)] = std::min((*bmin)[int(k)], local_bmins[3 * t + k]); (*bmax)[int(k)] = std::max((*bmax)[int(k)], local_bmaxs[3 * t + k]); } } } #endif template <typename T, class P> inline void ComputeBoundingBox(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { unsigned int idx = indices[left_index]; p.BoundingBox(bmin, bmax, idx); { // for each primitive for (unsigned int i = left_index + 1; i < right_index; i++) { idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); // xyz for (int k = 0; k < 3; k++) { (*bmin)[k] = std::min((*bmin)[k], bbox_min[k]); (*bmax)[k] = std::max((*bmax)[k], bbox_max[k]); } } } } template <typename T> inline void GetBoundingBox(real3<T> *bmin, real3<T> *bmax, const std::vector<BBox<T> > &bboxes, unsigned int *indices, unsigned int left_index, unsigned int right_index) { unsigned int i = left_index; unsigned int idx = indices[i]; (*bmin)[0] = bboxes[idx].bmin[0]; (*bmin)[1] = bboxes[idx].bmin[1]; (*bmin)[2] = bboxes[idx].bmin[2]; (*bmax)[0] = bboxes[idx].bmax[0]; (*bmax)[1] = bboxes[idx].bmax[1]; (*bmax)[2] = bboxes[idx].bmax[2]; // for each face for (i = left_index + 1; i < right_index; i++) { idx = indices[i]; // xyz for (int k = 0; k < 3; k++) { (*bmin)[k] = std::min((*bmin)[k], bboxes[idx].bmin[k]); (*bmax)[k] = std::max((*bmax)[k], bboxes[idx].bmax[k]); } } } // // -- // #if defined(NANORT_ENABLE_PARALLEL_BUILD) template <typename T> template <class P, class Pred> unsigned int BVHAccel<T>::BuildShallowTree(std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (stats_.max_tree_depth < depth) { stats_.max_tree_depth = depth; } real3<T> bmin, bmax; #if defined(NANORT_USE_CPP11_FEATURE) && defined(NANORT_ENABLE_PARALLEL_BUILD) ComputeBoundingBoxThreaded(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); #else ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); #endif unsigned int n = right_idx - left_idx; if ((n <= options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode<T> leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update stats_.num_leaf_nodes++; return offset; } // // Create branch node. // if (depth >= max_shallow_depth) { // Delay to build tree ShallowNodeInfo info; info.left_idx = left_idx; info.right_idx = right_idx; info.offset = offset; shallow_node_infos_.push_back(info); // Add dummy node. BVHNode<T> node; node.axis = -1; node.flag = -1; out_nodes->push_back(node); return offset; } else { // // TODO(LTE): multi-threaded SAH computation, or use simple object median or // spacial median for shallow tree to speeding up the parallel build. // // // Compute SAH and find best split axis and position // int min_cut_axis = 0; T cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median (which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis if there's an axis to try. } else { // Found good cut. exit loop. break; } } BVHNode<T> node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildShallowTree(out_nodes, left_idx, mid_idx, depth + 1, max_shallow_depth, p, pred); right_child_index = BuildShallowTree(out_nodes, mid_idx, right_idx, depth + 1, max_shallow_depth, p, pred); //std::cout << "shallow[" << offset << "] l and r = " << left_child_index << ", " << right_child_index << std::endl; (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } stats_.num_branch_nodes++; return offset; } #endif template <typename T> template <class P, class Pred> unsigned int BVHAccel<T>::BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (out_stat->max_tree_depth < depth) { out_stat->max_tree_depth = depth; } real3<T> bmin, bmax; if (!bboxes_.empty()) { GetBoundingBox(&bmin, &bmax, bboxes_, &indices_.at(0), left_idx, right_idx); } else { ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); } unsigned int n = right_idx - left_idx; if ((n <= options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode<T> leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update out_stat->num_leaf_nodes++; return offset; } // // Create branch node. // // // Compute SAH and find best split axis and position // int min_cut_axis = 0; T cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis to find better cut. } else { // Found good cut. exit loop. break; } } BVHNode<T> node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildTree(out_stat, out_nodes, left_idx, mid_idx, depth + 1, p, pred); right_child_index = BuildTree(out_stat, out_nodes, mid_idx, right_idx, depth + 1, p, pred); { (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } out_stat->num_branch_nodes++; return offset; } template <typename T> template <class Prim, class Pred> bool BVHAccel<T>::Build(unsigned int num_primitives, const Prim &p, const Pred &pred, const BVHBuildOptions<T> &options) { options_ = options; stats_ = BVHBuildStatistics(); nodes_.clear(); bboxes_.clear(); #if defined(NANORT_ENABLE_PARALLEL_BUILD) shallow_node_infos_.clear(); #endif assert(options_.bin_size > 1); if (num_primitives == 0) { return false; } unsigned int n = num_primitives; // // 1. Create triangle indices(this will be permutated in BuildTree) // indices_.resize(n); #if defined(NANORT_USE_CPP11_FEATURE) { size_t num_threads = std::min( size_t(kNANORT_MAX_THREADS), std::max(size_t(1), size_t(std::thread::hardware_concurrency()))); if (n < num_threads) { num_threads = n; } std::vector<std::thread> workers; size_t ndiv = n / num_threads; for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&, t]() { size_t si = t * ndiv; size_t ei = (t == (num_threads - 1)) ? n : std::min((t + 1) * ndiv, size_t(n)); for (size_t k = si; k < ei; k++) { indices_[k] = static_cast<unsigned int>(k); } })); } for (auto &t : workers) { t.join(); } } #else #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < static_cast<int>(n); i++) { indices_[static_cast<size_t>(i)] = static_cast<unsigned int>(i); } #endif // !NANORT_USE_CPP11_FEATURE // // 2. Compute bounding box (optional). // real3<T> bmin, bmax; if (options.cache_bbox) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); bboxes_.resize(n); for (size_t i = 0; i < n; i++) { // for each primitive unsigned int idx = indices_[i]; BBox<T> bbox; p.BoundingBox(&(bbox.bmin), &(bbox.bmax), static_cast<unsigned int>(i)); bboxes_[idx] = bbox; // xyz for (int k = 0; k < 3; k++) { bmin[k] = std::min(bmin[k], bbox.bmin[k]); bmax[k] = std::max(bmax[k], bbox.bmax[k]); } } } else { #if defined(NANORT_USE_CPP11_FEATURE) ComputeBoundingBoxThreaded(&bmin, &bmax, &indices_.at(0), 0, n, p); #elif defined(_OPENMP) ComputeBoundingBoxOMP(&bmin, &bmax, &indices_.at(0), 0, n, p); #else ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), 0, n, p); #endif } // // 3. Build tree // #if defined(NANORT_ENABLE_PARALLEL_BUILD) #if defined(NANORT_USE_CPP11_FEATURE) // Do parallel build for large enough datasets. if (n > options.min_primitives_for_parallel_build) { BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth, p, pred); // [0, n) assert(shallow_node_infos_.size() > 0); // Build deeper tree in parallel std::vector<std::vector<BVHNode<T> > > local_nodes( shallow_node_infos_.size()); std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size()); size_t num_threads = std::min( size_t(kNANORT_MAX_THREADS), std::max(size_t(1), size_t(std::thread::hardware_concurrency()))); if (shallow_node_infos_.size() < num_threads) { num_threads = shallow_node_infos_.size(); } std::vector<std::thread> workers; std::atomic<uint32_t> i(0); for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { uint32_t idx = 0; while ((idx = (i++)) < shallow_node_infos_.size()) { // Create thread-local copy of Pred since some mutable variables are // modified during SAH computation. const Pred local_pred = pred; unsigned int left_idx = shallow_node_infos_[size_t(idx)].left_idx; unsigned int right_idx = shallow_node_infos_[size_t(idx)].right_idx; BuildTree(&(local_stats[size_t(idx)]), &(local_nodes[size_t(idx)]), left_idx, right_idx, options.shallow_depth, p, local_pred); } })); } for (auto &t : workers) { t.join(); } // Join local nodes for (size_t ii = 0; ii < local_nodes.size(); ii++) { assert(!local_nodes[ii].empty()); size_t offset = nodes_.size(); // Add offset to child index (for branch node). for (size_t j = 0; j < local_nodes[ii].size(); j++) { if (local_nodes[ii][j].flag == 0) { // branch local_nodes[ii][j].data[0] += offset - 1; local_nodes[ii][j].data[1] += offset - 1; } } // replace nodes_[shallow_node_infos_[ii].offset] = local_nodes[ii][0]; // Skip root element of the local node. nodes_.insert(nodes_.end(), local_nodes[ii].begin() + 1, local_nodes[ii].end()); } // Join statistics for (size_t ii = 0; ii < local_nodes.size(); ii++) { stats_.max_tree_depth = std::max(stats_.max_tree_depth, local_stats[ii].max_tree_depth); stats_.num_leaf_nodes += local_stats[ii].num_leaf_nodes; stats_.num_branch_nodes += local_stats[ii].num_branch_nodes; } } else { // Single thread. BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #elif defined(_OPENMP) // Do parallel build for large enough datasets. if (n > options.min_primitives_for_parallel_build) { BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth, p, pred); // [0, n) assert(shallow_node_infos_.size() > 0); // Build deeper tree in parallel std::vector<std::vector<BVHNode<T> > > local_nodes( shallow_node_infos_.size()); std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size()); #pragma omp parallel for for (int i = 0; i < static_cast<int>(shallow_node_infos_.size()); i++) { unsigned int left_idx = shallow_node_infos_[size_t(i)].left_idx; unsigned int right_idx = shallow_node_infos_[size_t(i)].right_idx; const Pred local_pred = pred; BuildTree(&(local_stats[size_t(i)]), &(local_nodes[size_t(i)]), left_idx, right_idx, options.shallow_depth, p, local_pred); } // Join local nodes for (size_t i = 0; i < local_nodes.size(); i++) { assert(!local_nodes[size_t(i)].empty()); size_t offset = nodes_.size(); // Add offset to child index (for branch node). for (size_t j = 0; j < local_nodes[i].size(); j++) { if (local_nodes[i][j].flag == 0) { // branch local_nodes[i][j].data[0] += offset - 1; local_nodes[i][j].data[1] += offset - 1; } } // replace nodes_[shallow_node_infos_[i].offset] = local_nodes[i][0]; // Skip root element of the local node. nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1, local_nodes[i].end()); } // Join statistics for (size_t i = 0; i < local_nodes.size(); i++) { stats_.max_tree_depth = std::max(stats_.max_tree_depth, local_stats[i].max_tree_depth); stats_.num_leaf_nodes += local_stats[i].num_leaf_nodes; stats_.num_branch_nodes += local_stats[i].num_branch_nodes; } } else { // Single thread BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #else // !NANORT_ENABLE_PARALLEL_BUILD { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif #else // !_OPENMP // Single thread BVH build { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif return true; } template <typename T> void BVHAccel<T>::Debug() { for (size_t i = 0; i < indices_.size(); i++) { printf("index[%d] = %d\n", int(i), int(indices_[i])); } for (size_t i = 0; i < nodes_.size(); i++) { printf("node[%d] : bmin %f, %f, %f, bmax %f, %f, %f\n", int(i), nodes_[i].bmin[0], nodes_[i].bmin[1], nodes_[i].bmin[2], nodes_[i].bmax[0], nodes_[i].bmax[1], nodes_[i].bmax[2]); } } #if defined(NANORT_ENABLE_SERIALIZATION) template <typename T> bool BVHAccel<T>::Dump(const char *filename) const { FILE *fp = fopen(filename, "wb"); if (!fp) { // fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename); return false; } size_t numNodes = nodes_.size(); assert(nodes_.size() > 0); size_t numIndices = indices_.size(); size_t r = 0; r = fwrite(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fwrite(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } template <typename T> bool BVHAccel<T>::Dump(FILE *fp) const { size_t numNodes = nodes_.size(); assert(nodes_.size() > 0); size_t numIndices = indices_.size(); size_t r = 0; r = fwrite(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fwrite(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); return true; } template <typename T> bool BVHAccel<T>::Load(const char *filename) { FILE *fp = fopen(filename, "rb"); if (!fp) { // fprintf(stderr, "Cannot open file: %s\n", filename); return false; } size_t numNodes; size_t numIndices; size_t r = 0; r = fread(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); assert(numNodes > 0); nodes_.resize(numNodes); r = fread(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fread(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); indices_.resize(numIndices); r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } template <typename T> bool BVHAccel<T>::Load(FILE *fp) { size_t numNodes; size_t numIndices; size_t r = 0; r = fread(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); assert(numNodes > 0); nodes_.resize(numNodes); r = fread(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fread(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); indices_.resize(numIndices); r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); return true; } #endif template <typename T> inline bool IntersectRayAABB(T *tminOut, // [out] T *tmaxOut, // [out] T min_t, T max_t, const T bmin[3], const T bmax[3], real3<T> ray_org, real3<T> ray_inv_dir, int ray_dir_sign[3]); template <> inline bool IntersectRayAABB<float>(float *tminOut, // [out] float *tmaxOut, // [out] float min_t, float max_t, const float bmin[3], const float bmax[3], real3<float> ray_org, real3<float> ray_inv_dir, int ray_dir_sign[3]) { float tmin, tmax; const float min_x = ray_dir_sign[0] ? bmax[0] : bmin[0]; const float min_y = ray_dir_sign[1] ? bmax[1] : bmin[1]; const float min_z = ray_dir_sign[2] ? bmax[2] : bmin[2]; const float max_x = ray_dir_sign[0] ? bmin[0] : bmax[0]; const float max_y = ray_dir_sign[1] ? bmin[1] : bmax[1]; const float max_z = ray_dir_sign[2] ? bmin[2] : bmax[2]; // X const float tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0]; // MaxMult robust BVH traversal(up to 4 ulp). // 1.0000000000000004 for double precision. const float tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.00000024f; // Y const float tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1]; const float tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.00000024f; // Z const float tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2]; const float tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.00000024f; tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t))); tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t))); if (tmin <= tmax) { (*tminOut) = tmin; (*tmaxOut) = tmax; return true; } return false; // no hit } template <> inline bool IntersectRayAABB<double>(double *tminOut, // [out] double *tmaxOut, // [out] double min_t, double max_t, const double bmin[3], const double bmax[3], real3<double> ray_org, real3<double> ray_inv_dir, int ray_dir_sign[3]) { double tmin, tmax; const double min_x = ray_dir_sign[0] ? bmax[0] : bmin[0]; const double min_y = ray_dir_sign[1] ? bmax[1] : bmin[1]; const double min_z = ray_dir_sign[2] ? bmax[2] : bmin[2]; const double max_x = ray_dir_sign[0] ? bmin[0] : bmax[0]; const double max_y = ray_dir_sign[1] ? bmin[1] : bmax[1]; const double max_z = ray_dir_sign[2] ? bmin[2] : bmax[2]; // X const double tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0]; // MaxMult robust BVH traversal(up to 4 ulp). const double tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.0000000000000004; // Y const double tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1]; const double tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.0000000000000004; // Z const double tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2]; const double tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.0000000000000004; tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t))); tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t))); if (tmin <= tmax) { (*tminOut) = tmin; (*tmaxOut) = tmax; return true; } return false; // no hit } template <typename T> template <class I> inline bool BVHAccel<T>::TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; T t = intersector.GetT(); // current hit distance real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T local_t = t; if (intersector.Intersect(&local_t, prim_idx)) { // Update isect state t = local_t; intersector.Update(t, prim_idx); hit = true; } } return hit; } #if 0 // TODO(LTE): Implement template <typename T> template<class I, class H, class Comp> bool BVHAccel<T>::MultiHitTestLeafNode( std::priority_queue<H, std::vector<H>, Comp> *isect_pq, int max_intersections, const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; T t = std::numeric_limits<T>::max(); if (isect_pq->size() >= static_cast<size_t>(max_intersections)) { t = isect_pq->top().t; // current furthest hit distance } real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T local_t = t, u = 0.0f, v = 0.0f; if (intersector.Intersect(&local_t, &u, &v, prim_idx)) { // Update isect state if ((local_t > ray.min_t)) { if (isect_pq->size() < static_cast<size_t>(max_intersections)) { H isect; t = local_t; isect.t = t; isect.u = u; isect.v = v; isect.prim_id = prim_idx; isect_pq->push(isect); // Update t to furthest distance. t = ray.max_t; hit = true; } else if (local_t < isect_pq->top().t) { // delete furthest intersection and add new intersection. isect_pq->pop(); H hit; hit.t = local_t; hit.u = u; hit.v = v; hit.prim_id = prim_idx; isect_pq->push(hit); // Update furthest hit distance t = isect_pq->top().t; hit = true; } } } } return hit; } #endif template <typename T> template <class I, class H> bool BVHAccel<T>::Traverse(const Ray<T> &ray, const I &intersector, H *isect, const BVHTraceOptions &options) const { const int kMaxStackDepth = 512; (void)kMaxStackDepth; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Init isect info as no hit intersector.Update(hit_t, static_cast<unsigned int>(-1)); intersector.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? 1 : 0; dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? 1 : 0; dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? 1 : 0; real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t = std::numeric_limits<T>::max(); T max_t = -std::numeric_limits<T>::max(); while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[index]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (hit) { // Branch node if (node.flag == 0) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } else if (TestLeafNode(node, ray, intersector)) { // Leaf node hit_t = intersector.GetT(); } } } assert(node_stack_index < kNANORT_MAX_STACK_DEPTH); bool hit = (intersector.GetT() < ray.max_t); intersector.PostTraversal(ray, hit, isect); return hit; } template <typename T> template <class I> inline bool BVHAccel<T>::TestLeafNodeIntersections( const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections, const I &intersector, std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > *isect_pq) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; intersector.PrepareTraversal(ray); for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T min_t, max_t; if (intersector.Intersect(&min_t, &max_t, prim_idx)) { // Always add to isect lists. NodeHit<T> isect; isect.t_min = min_t; isect.t_max = max_t; isect.node_id = prim_idx; if (isect_pq->size() < static_cast<size_t>(max_intersections)) { isect_pq->push(isect); } else if (min_t < isect_pq->top().t_min) { // delete the furthest intersection and add a new intersection. isect_pq->pop(); isect_pq->push(isect); } } } return hit; } template <typename T> template <class I> bool BVHAccel<T>::ListNodeIntersections( const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<NodeHit<T>, 128> *hits) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Stores furthest intersection at top std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > isect_pq; (*hits)->clear(); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? 1 : 0; dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? 1 : 0; dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? 1 : 0; real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t, max_t; while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[static_cast<size_t>(index)]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (hit) { // Branch node if (node.flag == 0) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } else { // Leaf node TestLeafNodeIntersections(node, ray, max_intersections, intersector, &isect_pq); } } } assert(node_stack_index < kMaxStackDepth); (void)kMaxStackDepth; if (!isect_pq.empty()) { // Store intesection in reverse order (make it frontmost order) size_t n = isect_pq.size(); (*hits)->resize(n); for (size_t i = 0; i < n; i++) { const NodeHit<T> &isect = isect_pq.top(); (*hits)[n - i - 1] = isect; isect_pq.pop(); } return true; } return false; } #if 0 // TODO(LTE): Implement template <typename T> template<class I, class H, class Comp> bool BVHAccel<T>::MultiHitTraverse(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<H, 128> *hits, const BVHTraceOptions& options) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Stores furthest intersection at top std::priority_queue<H, std::vector<H>, Comp> isect_pq; (*hits)->clear(); // Init isect info as no hit intersector.Update(hit_t, static_cast<unsigned int>(-1)); intersector.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t, max_t; while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[static_cast<size_t>(index)]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); // branch node if(hit) { if (node.flag == 0) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } else { if (MultiHitTestLeafNode(&isect_pq, max_intersections, node, ray, intersector)) { // Only update `hit_t` when queue is full. if (isect_pq.size() >= static_cast<size_t>(max_intersections)) { hit_t = isect_pq.top().t; } } } } } assert(node_stack_index < kMaxStackDepth); (void)kMaxStackDepth; if (!isect_pq.empty()) { // Store intesection in reverse order (make it frontmost order) size_t n = isect_pq.size(); (*hits)->resize(n); for (size_t i = 0; i < n; i++) { const H &isect = isect_pq.top(); (*hits)[n - i - 1] = isect; isect_pq.pop(); } return true; } return false; } #endif #ifdef __clang__ #pragma clang diagnostic pop #endif } // namespace nanort #endif // NANORT_H_
kmeans.h
#ifndef SRC_CLUSTERING_KMEANS_H_ #define SRC_CLUSTERING_KMEANS_H_ #include "clustering/kmeans_trait.h" #include "util/alignment.h" #include <limits> #include <random> #include <vector> namespace GraphSfM { /** * @brief Kind of initialization of the kmeans centers */ enum class KMeansInitType { KMEANS_INIT_RANDOM, // Standard Llyod algoritm KMEANS_INIT_PP, // K-means++ initialization }; /** * @brief Compute minimum distance to any center * @param pts Input points * @param centers Centers * @param[out] dists computed (minimum) distance to any center */ template<typename DataType> void MinimumDistanceToAnyCenter( const std::vector<DataType>& pts, const std::vector<DataType>& centers, std::vector<typename KMeansVectorDataTrait<DataType>::scalar_type>& dists ) { using trait = KMeansVectorDataTrait<DataType>; dists.resize(pts.size(), std::numeric_limits<typename trait::scalar_type>::max()); #pragma omp parallel for for (int id_pt = 0; id_pt < static_cast<int>(pts.size()); ++id_pt) { const auto & pt = pts[id_pt]; for (const auto& c : centers) { const typename trait::scalar_type cur_d = trait::L2(pt, c); dists[id_pt] = std::min(dists[id_pt], cur_d); } } } /** * @brief Compute Nearest center Id of a given point * @param pt Query point * @param centers list of test centers * @return id of the nearest center (0-based) */ template<typename DataType> uint32_t NearestCenterID(const DataType& pt, const std::vector<DataType>& centers) { using trait = KMeansVectorDataTrait<DataType>; const uint32_t nb_cluster = static_cast<uint32_t>(centers.size()); typename trait::scalar_type min_dist = std::numeric_limits<typename trait::scalar_type>::max(); uint32_t nearest_center = nb_cluster; for (uint32_t cur_center = 0; cur_center < nb_cluster; ++cur_center) { const typename trait::scalar_type cur_dist = trait::L2(pt, centers[cur_center]); if (cur_dist < min_dist) { min_dist = cur_dist; nearest_center = cur_center; } } return nearest_center; } /** * @brief Compute center of mass of a set a points * @param pts List of points * @param assigned_center Id of the center to be affected to a given point * @param nb_center Number of center of mass in the result * @return New centers of mass */ template<typename DataType> std::vector<DataType> ComputeCenterOfMass(const std::vector<DataType>& pts, const std::vector<uint32_t>& assigned_center, const uint32_t nb_center) { using trait = KMeansVectorDataTrait<DataType>; std::vector<DataType> new_centers(nb_center, trait::null(pts[0])); std::vector<uint32_t> nb_per_center(nb_center, 0); // Affect points to centers for ( size_t id_pt = 0; id_pt < pts.size(); ++id_pt) { const uint32_t id_center = assigned_center[id_pt]; trait::accumulate(new_centers[id_center], pts[id_pt]); ++nb_per_center[id_center]; } // Compute mean of centers based on the number of points affected to each centers #pragma omp parallel for for (int id_center = 0; id_center < static_cast<int>(nb_center); ++id_center) { trait::divide(new_centers[id_center], nb_per_center[id_center]); } return new_centers; } /** * @brief Compute simple kmeans clustering on specified data * @param source_data Input data * @param[out] cluster_assignment index for each point in the input set to a specified cluster * @param[out] centers Centers of the clusters * @param nb_cluster requested number of cluster in the output * @param max_nb_iteration maximum number of iteration to do for clustering * @note This is the standard llyod algorithm */ template <typename DataType> void KMeans(const std::vector<DataType>& source_data, std::vector<uint32_t>& cluster_assignment, std::vector<DataType>& centers, const uint32_t nb_cluster, const uint32_t max_nb_iteration = std::numeric_limits<uint32_t>::max(), const KMeansInitType init_type = KMeansInitType::KMEANS_INIT_PP) { if (source_data.size() == 0) { return; } using trait = KMeansVectorDataTrait<DataType>; std::mt19937_64 rng(std::mt19937_64::default_seed); // 1 - init center of mass if (init_type == KMeansInitType::KMEANS_INIT_PP) { // Kmeans++ init: // first one is a random one // the others based on the importance probability (Di / \sum_i Di) where: // Di is the minimum distance to any created centers already created std::uniform_int_distribution<size_t> distrib_first(0, source_data.size() - 1); centers.reserve(nb_cluster); centers.emplace_back(source_data[ distrib_first( rng )]); std::vector<typename trait::scalar_type> dists; for (uint32_t id_center = 1; id_center < nb_cluster; ++id_center) { // Compute Di / \sum Di pdf MinimumDistanceToAnyCenter(source_data, centers, dists); std::discrete_distribution<size_t> distrib_c(dists.cbegin(), dists.cend()); // Sample a point from this distribution centers.emplace_back(source_data[distrib_c(rng)]); } } else if (init_type == KMeansInitType::KMEANS_INIT_RANDOM) { DataType min, max; trait::minMax(source_data, min, max); // Standard Llyod init centers.resize(nb_cluster); std::uniform_int_distribution<size_t> distrib(0, source_data.size() - 1); for (auto & cur_center : centers) { cur_center = source_data[distrib(rng)]; } } else { // Invalid Kmeans initialization type return; } // Assign all element to the first center cluster_assignment.resize(source_data.size(), nb_cluster); bool changed; uint32_t id_iteration = 0; // 2 - Perform kmeans do { changed = false; // 2.1 affect center to each points #pragma omp parallel for shared(changed) for (int id_pt = 0; id_pt < static_cast<int>(source_data.size()); ++id_pt) { const DataType & cur_pt = source_data[id_pt]; // Compute nearest center of this point const uint32_t nearest_center = NearestCenterID(cur_pt, centers); if (cluster_assignment[id_pt] != nearest_center) { cluster_assignment[id_pt] = nearest_center; changed = true; } } // 2.2 Compute new centers of mass centers = ComputeCenterOfMass(source_data, cluster_assignment, nb_cluster); ++id_iteration; } while(changed && id_iteration < max_nb_iteration); } } // namespace GraphSfM #endif
softmax_hcl_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haoluo@openailab.com */ #include "softmax_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "module/module.h" #include "operator/op.h" #include "utility/sys_port.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> #include <arm_neon.h> static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; int ret = 0; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); if (input_tensor->dims[0] != output_tensor->dims[0] || input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] || input_tensor->dims[3] != output_tensor->dims[3]) ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num); return ret; } static inline float32x4_t vexpq10_f32(float32x4_t x) { x = vmlaq_n_f32(vdupq_n_f32(1.0f), x, 0.0009765625f); // n = 10 x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); return x; } static void GetMaxArray(float* input, float* array, int in_size, int on_size, int num_thread) { float* input_ptr = (float*)input; float* array_ptr = (float*)array; memset(array, 0, in_size * sizeof(float)); // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < on_size; j++) { // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < (in_size & -4); i += 4) { float32x4_t _p = vld1q_f32(array_ptr + i); float32x4_t _in = vld1q_f32(input_ptr + j * in_size + i); #ifdef __aarch64__ _p = vpmaxq_f32(_p, _in); #else _p = vmaxq_f32(_p, vrev64q_f32(_in)); _p = vmaxq_f32(_p, vextq_f32(_p, _in, 2)); #endif vst1q_f32(array_ptr + i, _p); } for (int i = in_size & ~3; i < in_size; i++) { if (array_ptr[i] < input_ptr[j * in_size + i]) array_ptr[i] = input_ptr[j * in_size + i]; } /* for(int l = 0; l < in_size; l++) { if(array_ptr[l] < input_ptr[j * in_size + l]) array_ptr[l] = input_ptr[j * in_size + l]; } */ } } static void GetOutResult(float* input, float* output, float* maxarray, float* sum_array, int in_size, int on_size, int num_thread) { float* input_ptr = (float*)input; float* output_ptr = (float*)output; float* maxarray_ptr = (float*)maxarray; float* sum_array_ptr = (float*)sum_array; memset(sum_array, 0x0, in_size * sizeof(float)); /* get the exp and the summary */ // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < on_size; j++) { // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < (in_size & -4); i += 4) { int index = j * in_size + i; float32x4_t out = vexpq10_f32(vsubq_f32(vld1q_f32(input_ptr + index), vld1q_f32(maxarray_ptr + i))); float32x4_t sum = vaddq_f32(vld1q_f32(sum_array_ptr + i), out); vst1q_f32(output_ptr + index, out); vst1q_f32(sum_array_ptr + i, sum); } for (int i = in_size & ~3; i < in_size; i++) { int index = j * in_size + i; output_ptr[index] = exp(input_ptr[index] - maxarray_ptr[i]); sum_array_ptr[i] += output_ptr[index]; } } /* for(int l = 0; l < in_size; l++) { int index = j * in_size + l; output_ptr[index] = exp(input_ptr[index] - array_ptr[l]); sum_array_ptr[l] += output_ptr[index]; } */ /* the final result */ for (int j = 0; j < on_size; j++) for (int l = 0; l < in_size; l++) { int index = j * in_size + l; output_ptr[index] /= sum_array_ptr[l]; } } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct softmax_param* softmax_param = (struct softmax_param*)ir_node->op.param_mem; int element_size = input_tensor->elem_size; int dims[4]; for (int i = 0; i < input_tensor->dim_num; i++) { dims[i] = input_tensor->dims[i]; } int axis = softmax_param->axis; int out_size, in_size, on_size; out_size = 1; for (int i = 0; i < axis; i++) { out_size *= dims[i]; } in_size = 1; for (size_t i = axis + 1; i < input_tensor->dim_num; i++) { in_size *= dims[i]; } on_size = dims[axis]; uint8_t* input = input_tensor->data; uint8_t* output = output_tensor->data; float* max_array = (float*)malloc(in_size * sizeof(float)); float* sum_array = (float*)malloc(in_size * sizeof(float)); int on_in_size = on_size * in_size; float* input_f = NULL; float* output_f = NULL; if (element_size == 1) { input_f = (float*)malloc(on_in_size * 4); output_f = (float*)malloc(on_in_size * 4); /* todo */ free(input_f); free(output_f); } for (int i = 0; i < out_size; i++) { /* get max */ int img_base = i * on_in_size * element_size; GetMaxArray((float*)(input + img_base), max_array, in_size, on_size, exec_graph->num_thread); GetOutResult((float*)(input + img_base), (float*)(output + img_base), max_array, sum_array, in_size, on_size, exec_graph->num_thread); } free(max_array); free(sum_array); return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); /* todo support uint8 */ if (input_tensor->data_type != TENGINE_DT_FP32) return 0; return OPS_SCORE_BEST; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_softmax_hcl_arm_op() { return register_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops); } int unregister_softmax_hcl_arm_op() { return unregister_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops); }
hello_world_omp.c
#include <omp.h> #include <stdio.h> int main(int *argc, char *argv[]) { #pragma omp parallel { printf("Hello world!\n"); } return 0; }
bubble_helper_progs.c
#include "Parameter_files/INIT_PARAMS.H" #include "Parameter_files/ANAL_PARAMS.H" void HII_filter(fftwf_complex *box, int filter_type, float R){ int n_x, n_z, n_y; float k_x, k_y, k_z, k_mag, k_mag_x, k_mag_y,kR; #pragma omp parallel shared(box, filter_type, R) private(k_x, k_y, k_z, k_mag, k_mag_x, k_mag_y, kR, n_x, n_z, n_y) { // loop through k-box #pragma omp for for (n_x=0; n_x<HII_DIM; n_x++){ if (n_x>HII_MIDDLE) { // k_x =(n_x-HII_DIM) * DELTA_K; k_mag_x = (n_x-HII_DIM)*(n_x-HII_DIM); } else { // k_x = n_x * DELTA_K; k_mag_x = n_x*n_x; } // k_mag_x = k_x*k_x; for (n_y=0; n_y<HII_DIM; n_y++){ if (n_y>HII_MIDDLE) { // k_y =(n_y-HII_DIM) * DELTA_K; k_mag_y = (n_y-HII_DIM)*(n_y-HII_DIM); } else { // k_y = n_y * DELTA_K; k_mag_y = n_y*n_y; } // k_mag_y = k_y*k_y; for (n_z=0; n_z<=HII_MIDDLE; n_z++){ // k_z = n_z * DELTA_K; // k_mag = sqrt(k_mag_x + k_mag_y + k_z*k_z); k_mag = sqrt(k_mag_x + k_mag_y + n_z*n_z)*DELTA_K; kR = k_mag*R; // real space top-hat if (filter_type == 0){ // real space top-hat if (kR > 1e-4){ // box[HII_C_INDEX(n_x, n_y, n_z)] *= 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2)); box[HII_C_INDEX(n_x, n_y, n_z)] *= 3.0 * (sin(kR)/(kR*kR*kR) - cos(kR)/(kR*kR)); } } else if (filter_type == 1){ // k-space top hat kR *= 0.413566994; // equates integrated volume to the real space top-hat (9pi/2)^(-1/3) if (kR > 1){ box[HII_C_INDEX(n_x, n_y, n_z)] = 0; } } else if (filter_type == 2){ // gaussian kR *= 0.643; // equates integrated volume to the real space top-hat box[HII_C_INDEX(n_x, n_y, n_z)] *= pow(E, -kR*kR/2.0); } else{ if ( (n_x==0) && (n_y==0) && (n_z==0) ) fprintf(stderr, "HII_filter.c: Warning, filter type %i is undefined\nBox is unfiltered\n", filter_type); } } } } // end looping through k box } return; } /* all lengths are in units of the box size (x,y,z) is the closest reflection of (x2,y2,z2) to (x1, y1, z1) */ float distance_coord(float x1, float y1, float z1, float x2, float y2, float z2, float *x, float *y, float *z ){ float minimumsq, xsq, ysq, zsq, xplussq, yplussq, zplussq, xminsq, yminsq, zminsq; // remember to check all reflections xsq = pow(x1-x2, 2); ysq = pow(y1-y2, 2); zsq = pow(z1-z2, 2); xplussq = pow(x1-x2+1, 2); yplussq = pow(y1-y2+1, 2); zplussq = pow(z1-z2+1, 2); xminsq = pow(x1-x2-1, 2); yminsq = pow(y1-y2-1, 2); zminsq = pow(z1-z2-1, 2); // the true distance is the minimum of the permutations above minimumsq = 10; if ( minimumsq > (xsq + ysq + zsq) ){ minimumsq = (xsq + ysq + zsq); *x=x2; *y=y2; *z=z2; } if (minimumsq > (xsq + ysq + zplussq)){ minimumsq = (xsq + ysq + zplussq); *x=x2; *y=y2; *z=z2-1; } if (minimumsq > (xsq + ysq + zminsq)){ minimumsq = (xsq + ysq + zminsq); *x=x2; *y=y2; *z=z2+1; } if (minimumsq > (xsq + yplussq + zsq)){ minimumsq = (xsq + yplussq + zsq); *x=x2; *y=y2-1; *z=z2; } if (minimumsq > (xsq + yplussq + zplussq)){ minimumsq = (xsq + yplussq + zplussq); *x=x2; *y=y2-1; *z=z2-1; } if (minimumsq > (xsq + yplussq + zminsq)){ minimumsq = (xsq + yplussq + zminsq); *x=x2; *y=y2-1; *z=z2+1; } if (minimumsq > (xsq + yminsq + zsq)){ minimumsq = (xsq + yminsq + zsq); *x=x2; *y=y2+1; *z=z2; } if (minimumsq > (xsq + yminsq + zplussq)){ minimumsq = (xsq + yminsq + zplussq); *x=x2; *y=y2+1; *z=z2-1; } if (minimumsq > (xsq + yminsq + zminsq)){ minimumsq = (xsq + yminsq + zminsq); *x=x2; *y=y2+1; *z=z2+1; } if (minimumsq > (xplussq + ysq + zsq)){ minimumsq = (xplussq + ysq + zsq); *x=x2-1; *y=y2; *z=z2; } if (minimumsq > (xplussq + ysq + zplussq)){ minimumsq = (xplussq + ysq + zplussq); *x=x2-1; *y=y2; *z=z2-1; } if (minimumsq > (xplussq + ysq + zminsq)){ minimumsq = (xplussq + ysq + zminsq); *x=x2-1; *y=y2; *z=z2+1; } if (minimumsq > (xplussq + yplussq + zsq)){ minimumsq = (xplussq + yplussq + zsq); *x=x2-1; *y=y2-1; *z=z2; } if (minimumsq > (xplussq + yplussq + zplussq)){ minimumsq = (xplussq + yplussq + zplussq); *x=x2-1; *y=y2-1; *z=z2-1; } if (minimumsq > (xplussq + yplussq + zminsq)){ minimumsq = (xplussq + yplussq + zminsq); *x=x2-1; *y=y2-1; *z=z2+1; } if (minimumsq > (xplussq + yminsq + zsq)){ minimumsq = (xplussq + yminsq + zsq); *x=x2-1; *y=y2+1; *z=z2; } if (minimumsq > (xplussq + yminsq + zplussq)){ minimumsq = (xplussq + yminsq + zplussq); *x=x2-1; *y=y2+1; *z=z2-1; } if (minimumsq > (xplussq + yminsq + zminsq)){ minimumsq = (xplussq + yminsq + zminsq); *x=x2-1; *y=y2+1; *z=z2+1; } if (minimumsq > (xminsq + ysq + zsq)){ minimumsq = (xminsq + ysq + zsq); *x=x2+1; *y=y2; *z=z2; } if (minimumsq > (xminsq + ysq + zplussq)){ minimumsq = (xminsq + ysq + zplussq); *x=x2+1; *y=y2; *z=z2-1; } if (minimumsq > (xminsq + ysq + zminsq)){ minimumsq = (xminsq + ysq + zminsq); *x=x2+1; *y=y2; *z=z2+1; } if (minimumsq > (xminsq + yplussq + zsq)){ minimumsq = (xminsq + yplussq + zsq); *x=x2+1; *y=y2-1; *z=z2; } if (minimumsq > (xminsq + yplussq + zplussq)){ minimumsq = (xminsq + yplussq + zplussq); *x=x2+1; *y=y2-1; *z=z2-1; } if (minimumsq > (xminsq + yplussq + zminsq)){ minimumsq = (xminsq + yplussq + zminsq); *x=x2+1; *y=y2-1; *z=z2+1; } if (minimumsq > (xminsq + yminsq + zsq)){ minimumsq = (xminsq + yminsq + zsq); *x=x2+1; *y=y2+1; *z=z2; } if (minimumsq > (xminsq + yminsq + zplussq)){ minimumsq = (xminsq + yminsq + zplussq); *x=x2+1; *y=y2+1; *z=z2-1; } if (minimumsq > (xminsq + yminsq + zminsq)){ minimumsq = (xminsq + yminsq + zminsq); *x=x2+1; *y=y2+1; *z=z2+1; } return sqrt(minimumsq); } /* all lengths are in units of the box size */ float distance(float x1, float y1, float z1, float x2, float y2, float z2){ float minimumsq, xsq, ysq, zsq, xplussq, yplussq, zplussq, xminsq, yminsq, zminsq; // remember to check all reflections xsq = pow(x1-x2, 2); ysq = pow(y1-y2, 2); zsq = pow(z1-z2, 2); xplussq = pow(x1-x2+1, 2); yplussq = pow(y1-y2+1, 2); zplussq = pow(z1-z2+1, 2); xminsq = pow(x1-x2-1, 2); yminsq = pow(y1-y2-1, 2); zminsq = pow(z1-z2-1, 2); // the true distance is the minimum of the permutations above minimumsq = 10; if ( minimumsq > (xsq + ysq + zsq) ) minimumsq = (xsq + ysq + zsq); if (minimumsq > (xsq + ysq + zplussq)) minimumsq = (xsq + ysq + zplussq); if (minimumsq > (xsq + ysq + zminsq)) minimumsq = (xsq + ysq + zminsq); if (minimumsq > (xsq + yplussq + zsq)) minimumsq = (xsq + yplussq + zsq); if (minimumsq > (xsq + yplussq + zplussq)) minimumsq = (xsq + yplussq + zplussq); if (minimumsq > (xsq + yplussq + zminsq)) minimumsq = (xsq + yplussq + zminsq); if (minimumsq > (xsq + yminsq + zsq)) minimumsq = (xsq + yminsq + zsq); if (minimumsq > (xsq + yminsq + zplussq)) minimumsq = (xsq + yminsq + zplussq); if (minimumsq > (xsq + yminsq + zminsq)) minimumsq = (xsq + yminsq + zminsq); if (minimumsq > (xplussq + ysq + zsq)) minimumsq = (xplussq + ysq + zsq); if (minimumsq > (xplussq + ysq + zplussq)) minimumsq = (xplussq + ysq + zplussq); if (minimumsq > (xplussq + ysq + zminsq)) minimumsq = (xplussq + ysq + zminsq); if (minimumsq > (xplussq + yplussq + zsq)) minimumsq = (xplussq + yplussq + zsq); if (minimumsq > (xplussq + yplussq + zplussq)) minimumsq = (xplussq + yplussq + zplussq); if (minimumsq > (xplussq + yplussq + zminsq)) minimumsq = (xplussq + yplussq + zminsq); if (minimumsq > (xplussq + yminsq + zsq)) minimumsq = (xplussq + yminsq + zsq); if (minimumsq > (xplussq + yminsq + zplussq)) minimumsq = (xplussq + yminsq + zplussq); if (minimumsq > (xplussq + yminsq + zminsq)) minimumsq = (xplussq + yminsq + zminsq); if (minimumsq > (xminsq + ysq + zsq)) minimumsq = (xminsq + ysq + zsq); if (minimumsq > (xminsq + ysq + zplussq)) minimumsq = (xminsq + ysq + zplussq); if (minimumsq > (xminsq + ysq + zminsq)) minimumsq = (xminsq + ysq + zminsq); if (minimumsq > (xminsq + yplussq + zsq)) minimumsq = (xminsq + yplussq + zsq); if (minimumsq > (xminsq + yplussq + zplussq)) minimumsq = (xminsq + yplussq + zplussq); if (minimumsq > (xminsq + yplussq + zminsq)) minimumsq = (xminsq + yplussq + zminsq); if (minimumsq > (xminsq + yminsq + zsq)) minimumsq = (xminsq + yminsq + zsq); if (minimumsq > (xminsq + yminsq + zplussq)) minimumsq = (xminsq + yminsq + zplussq); if (minimumsq > (xminsq + yminsq + zminsq)) minimumsq = (xminsq + yminsq + zminsq); return sqrt(minimumsq); } // helper function for update in sphere below void check_region(float * box, int dimensions, float Rsq_curr_index, int x, int y, int z, int x_min, int x_max, int y_min, int y_max, int z_min, int z_max){ int x_curr, y_curr, z_curr, x_index, y_index, z_index; float xsq, xplussq, xminsq, ysq, yplussq, yminsq, zsq, zplussq, zminsq; for (x_curr=x_min; x_curr<=x_max; x_curr++){ for (y_curr=y_min; y_curr<=y_max; y_curr++){ for (z_curr=z_min; z_curr<=z_max; z_curr++){ x_index = x_curr; y_index = y_curr; z_index = z_curr; // adjust if we are outside of the box if (x_index<0) {x_index += dimensions;} else if (x_index>=dimensions) {x_index -= dimensions;} if (y_index<0) {y_index += dimensions;} else if (y_index>=dimensions) {y_index -= dimensions;} if (z_index<0) {z_index += dimensions;} else if (z_index>=dimensions) {z_index -= dimensions;} // now check // printf("checking %i, %i, %i\n", x_index, y_index, z_index); //printf("this is index, %llu\n", HII_R_INDEX(x_index, y_index, z_index)); //fflush(NULL); if (box[HII_R_INDEX(x_index, y_index, z_index)]){ // untaken pixel (not part of other halo) // printf("in if\n"); // fflush(NULL); // remember to check all reflections xsq = pow(x-x_index, 2); ysq = pow(y-y_index, 2); zsq = pow(z-z_index, 2); xplussq = pow(x-x_index+dimensions, 2); yplussq = pow(y-y_index+dimensions, 2); zplussq = pow(z-z_index+dimensions, 2); xminsq = pow(x-x_index-dimensions, 2); yminsq = pow(y-y_index-dimensions, 2); zminsq = pow(z-z_index-dimensions, 2); if ( (Rsq_curr_index > (xsq + ysq + zsq)) || (Rsq_curr_index > (xsq + ysq + zplussq)) || (Rsq_curr_index > (xsq + ysq + zminsq)) || (Rsq_curr_index > (xsq + yplussq + zsq)) || (Rsq_curr_index > (xsq + yplussq + zplussq)) || (Rsq_curr_index > (xsq + yplussq + zminsq)) || (Rsq_curr_index > (xsq + yminsq + zsq)) || (Rsq_curr_index > (xsq + yminsq + zplussq)) || (Rsq_curr_index > (xsq + yminsq + zminsq)) || (Rsq_curr_index > (xplussq + ysq + zsq)) || (Rsq_curr_index > (xplussq + ysq + zplussq)) || (Rsq_curr_index > (xplussq + ysq + zminsq)) || (Rsq_curr_index > (xplussq + yplussq + zsq)) || (Rsq_curr_index > (xplussq + yplussq + zplussq)) || (Rsq_curr_index > (xplussq + yplussq + zminsq)) || (Rsq_curr_index > (xplussq + yminsq + zsq)) || (Rsq_curr_index > (xplussq + yminsq + zplussq)) || (Rsq_curr_index > (xplussq + yminsq + zminsq)) || (Rsq_curr_index > (xminsq + ysq + zsq)) || (Rsq_curr_index > (xminsq + ysq + zplussq)) || (Rsq_curr_index > (xminsq + ysq + zminsq)) || (Rsq_curr_index > (xminsq + yplussq + zsq)) || (Rsq_curr_index > (xminsq + yplussq + zplussq)) || (Rsq_curr_index > (xminsq + yplussq + zminsq)) || (Rsq_curr_index > (xminsq + yminsq + zsq)) || (Rsq_curr_index > (xminsq + yminsq + zplussq)) || (Rsq_curr_index > (xminsq + yminsq + zminsq)) ){ // we are within the sphere defined by R, so change flag in box array box[HII_R_INDEX(x_index, y_index, z_index)] = 0; // box[HII_R_INDEX(x_index, y_index, z_index)] = 15; //printf("%i, %i, %i\n", x_index, y_index, z_index); // fflush(NULL); } } } } } } /* Function UPDATE_IN_SPHERE takes in a <box> and flags all points which fall within radius R of (x,y,z). all lengths are in units of box size. */ void update_in_sphere(float * box, int dimensions, float R, float xf, float yf, float zf){ int x_curr, y_curr, z_curr, xb_min, xb_max, yb_min, yb_max, zb_min, zb_max, R_index; int xl_min, xl_max, yl_min, yl_max, zl_min, zl_max; float Rsq_curr_index; int x_index, y_index, z_index, x, y, z; if (R<0) return; // printf("in update, dim=%i, R=%f, x=%f, y=%f, z=%f\n", dimensions, R, xf, yf, zf); //fflush(NULL); // convert distances to index units x = (int) (xf * dimensions + 0.5); // +0.5 for rounding y = (int) (yf * dimensions + 0.5); z = (int) (zf * dimensions + 0.5); /***** first, just automatically fill in the inner cube whose diagonal is R, side is 2R/sqrt(3) *****/ R_index = ceil(R/sqrt(3.0)*dimensions)-1; // set parameter range xl_min = x-R_index; xl_max = x+R_index; yl_min = y-R_index; yl_max = y+R_index; zl_min = z-R_index; zl_max = z+R_index; for (x_curr=xl_min; x_curr<=xl_max; x_curr++){ for (y_curr=yl_min; y_curr<=yl_max; y_curr++){ for (z_curr=zl_min; z_curr<=zl_max; z_curr++){ x_index = x_curr; y_index = y_curr; z_index = z_curr; // adjust if we are outside of the box if (x_index<0) {x_index += dimensions;} else if (x_index>=dimensions) {x_index -= dimensions;} if (y_index<0) {y_index += dimensions;} else if (y_index>=dimensions) {y_index -= dimensions;} if (z_index<0) {z_index += dimensions;} else if (z_index>=dimensions) {z_index -= dimensions;} // now just paint it //box[HII_R_INDEX(x_index, y_index, z_index)] = 15; box[HII_R_INDEX(x_index, y_index, z_index)] = 0; } } } /****** now check the pixels between the smaller and the larger cube which encloses the sphere ******/ R_index = ceil(R*dimensions); Rsq_curr_index = pow(R*dimensions, 2); // convert to index // set parameter range xb_min = x-R_index; xb_max = x+R_index; yb_min = y-R_index; yb_max = y+R_index; zb_min = z-R_index; zb_max = z+R_index; // check_region(box, dimensions, Rsq_curr_index, x,y,z, xb_min, xb_max, yb_min, yb_max, zb_min, zb_max); check_region(box, dimensions, Rsq_curr_index, x,y,z, xb_min, xl_min, yb_min, yb_max, zb_min, zb_max); check_region(box, dimensions, Rsq_curr_index, x,y,z, xl_max, xb_max, yb_min, yb_max, zb_min, zb_max); check_region(box, dimensions, Rsq_curr_index, x,y,z, xb_min, xb_max, yb_min, yl_min, zb_min, zb_max); check_region(box, dimensions, Rsq_curr_index, x,y,z, xb_min, xb_max, yl_max, yb_max, zb_min, zb_max); check_region(box, dimensions, Rsq_curr_index, x,y,z, xb_min, xb_max, yb_min, yb_max, zb_min, zl_min); check_region(box, dimensions, Rsq_curr_index, x,y,z, xb_min, xb_max, yb_min, yb_max, zl_max, zb_max); }
cffi.c
#include <stdbool.h> #include <stdio.h> #include <math.h> #include <inttypes.h> // Check little or big endian // Linux (32- or 64-bit) is probably little endian bool endian(void) { // https://stackoverflow.com/questions/12791864/c-program-to-check-little-vs-big-endian volatile uint32_t i=0x01234567; // return 0 for big endian, 1 for little endian. return (*((uint8_t*)(&i))) == 0x67; } void cffi_int32and( const int n, const int* a, const int* b, int* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] & b[elem_idx]; } } void cffi_int32or( const int n, const int* a, const int* b, int* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] | b[elem_idx]; } } void cffi_int32xor( const int n, const int* a, const int* b, int* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] ^ b[elem_idx]; } } void cffi_int32msbprojection( const int n, const int* original, const int* perturbed, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { output[elem_idx] = original[elem_idx]; // bit_idx = 0 would be LSB on little-endian for (int bit_idx = 31; bit_idx >= 0; bit_idx--) { int mask = 1 << bit_idx; int original_bit = original[elem_idx] & mask; int perturbed_bit = perturbed[elem_idx] & mask; if (original_bit != perturbed_bit) { output[elem_idx] ^= mask; break; } } } } void cffi_int32hammingdistance( const int n, const int* a, const int* b, int* dist ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { dist[elem_idx] = 0; int x = a[elem_idx] ^ b[elem_idx]; while(x != 0) { x = x & (x-1); dist[elem_idx]++; } } } void cffi_int32flip( const int n, const bool* mask, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { int xor_mask = 0; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(mask[32*elem_idx + bit_idx]) { xor_mask |= (1 << bit_idx); } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int32set( const int n, const bool* set1, const bool* set0, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { int set1_mask = 0; int set0_mask = 0; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (set1[32*elem_idx + bit_idx]) { set1_mask |= (1 << bit_idx); } if (set0[32*elem_idx + bit_idx]) { set0_mask |= (1 << bit_idx); } } int output_elem = input[elem_idx]; output_elem |= set1_mask; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int32setzero( const int n, const int m, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { int set0_mask = 0; for (int bit_idx = 0; bit_idx <32; bit_idx ++) { if (bit_idx < m) { // first bit is lSB if little endian (Linux 32- or 64-bit) set0_mask |= (1 << bit_idx); } } int output_elem = input[elem_idx]; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int32randomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { int xor_mask = 0; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(!protected_bits[bit_idx]) { int input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if(rand_src[32*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int32maskedrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { int xor_mask = 0; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(!protected_bits[bit_idx]) { int input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if(rand_src[32*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int32individualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { int xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(!protected_bits[bit_idx]) { int input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[32*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[32*elem_idx + bit_idx]; } if(rand_src[32*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int32maskedindividualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { int xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(!protected_bits[bit_idx]) { int input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[32*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[32*elem_idx + bit_idx]; } if(rand_src[32*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int32bits( const int n, const int* input, bool* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) int mask = 1 << bit_idx; int masked_input = input[elem_idx] & mask; int bit = masked_input >> bit_idx; output[32*elem_idx + bit_idx] = bit; } } } void cffi_int16and( const int n, const short* a, const short* b, short* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] & b[elem_idx]; } } void cffi_int16or( const int n, const short* a, const short* b, short* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] | b[elem_idx]; } } void cffi_int16xor( const int n, const short* a, const short* b, short* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] ^ b[elem_idx]; } } void cffi_int16msbprojection( const int n, const short* original, const short* perturbed, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { output[elem_idx] = original[elem_idx]; // bit_idx = 0 would be LSB on little-endian for (int bit_idx = 15; bit_idx >= 0; bit_idx--) { short mask = 1 << bit_idx; short original_bit = original[elem_idx] & mask; short perturbed_bit = perturbed[elem_idx] & mask; if (original_bit != perturbed_bit) { output[elem_idx] ^= mask; break; } } } } void cffi_int16hammingdistance( const int n, const short* a, const short* b, int* dist ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { dist[elem_idx] = 0; short x = a[elem_idx] ^ b[elem_idx]; while(x != 0) { x = x & (x-1); dist[elem_idx]++; } } } void cffi_int16flip( const int n, const bool* mask, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { short xor_mask = 0; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(mask[16*elem_idx + bit_idx]) { xor_mask |= (1 << bit_idx); } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int16set( const int n, const bool* set1, const bool* set0, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { short set1_mask = 0; short set0_mask = 0; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (set1[16*elem_idx + bit_idx]) { set1_mask |= (1 << bit_idx); } if (set0[16*elem_idx + bit_idx]) { set0_mask |= (1 << bit_idx); } } short output_elem = input[elem_idx]; output_elem |= set1_mask; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int16setzero( const int n, const int m, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { short set0_mask = 0; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { if (bit_idx < m) { // first bit is lSB if little endian (Linux 32- or 64-bit) set0_mask |= (1 << bit_idx); } } short output_elem = input[elem_idx]; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int16randomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { short xor_mask = 0; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { short input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[16*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int16maskedrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { short xor_mask = 0; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { short input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[16*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int16individualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { short xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { short input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[16*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[16*elem_idx + bit_idx]; } if (rand_src[16*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int16maskedindividualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { short xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { short input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[16*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[16*elem_idx + bit_idx]; } if (rand_src[16*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int16bits( const int n, const short* input, bool* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) short mask = 1 << bit_idx; short masked_input = input[elem_idx] & mask; short bit = masked_input >> bit_idx; output[16*elem_idx + bit_idx] = bit; } } } void cffi_int8and( const int n, const char* a, const char* b, char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] & b[elem_idx]; } } void cffi_int8or( const int n, const char* a, const char* b, char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] | b[elem_idx]; } } void cffi_int8xor( const int n, const char* a, const char* b, char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] ^ b[elem_idx]; } } void cffi_int8msbprojection( const int n, const char* original, const char* perturbed, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { output[elem_idx] = original[elem_idx]; // bit_idx = 0 would be LSB on little-endian for (int bit_idx = 7; bit_idx >= 0; bit_idx--) { char mask = 1 << bit_idx; char original_bit = original[elem_idx] & mask; char perturbed_bit = perturbed[elem_idx] & mask; if (original_bit != perturbed_bit) { output[elem_idx] ^= mask; break; } } } } void cffi_int8hammingdistance( const int n, const char* a, const char* b, int* dist ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { dist[elem_idx] = 0; char x = a[elem_idx] ^ b[elem_idx]; while(x != 0) { x = x & (x-1); dist[elem_idx]++; } } } void cffi_int8flip( const int n, const bool* mask, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(mask[8*elem_idx + bit_idx]) { xor_mask |= (1 << bit_idx); } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int8set( const int n, const bool* set1, const bool* set0, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { char set1_mask = 0; char set0_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (set1[8*elem_idx + bit_idx]) { set1_mask |= (1 << bit_idx); } if (set0[8*elem_idx + bit_idx]) { set0_mask |= (1 << bit_idx); } } char output_elem = input[elem_idx]; output_elem |= set1_mask; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int8setzero( const int n, const int m, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { char set0_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { if (bit_idx < m) { // first bit is lSB if little endian (Linux 32- or 64-bit) set0_mask |= (1 << bit_idx); } } char output_elem = input[elem_idx]; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int8randomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int8maskedrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int8individualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { char xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[8*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[8*elem_idx + bit_idx]; } if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int8maskedindividualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { char xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[8*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[8*elem_idx + bit_idx]; } if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int8bits( const int n, const char* input, bool* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) char mask = 1 << bit_idx; char masked_input = input[elem_idx] & mask; char bit = masked_input >> bit_idx; output[8*elem_idx + bit_idx] = bit; } } } void cffi_uint8and( const int n, const unsigned char* a, const unsigned char* b, unsigned char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] & b[elem_idx]; } } void cffi_uint8or( const int n, const unsigned char* a, const unsigned char* b, unsigned char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] | b[elem_idx]; } } void cffi_uint8xor( const int n, const unsigned char* a, const unsigned char* b, unsigned char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] ^ b[elem_idx]; } } void cffi_uint8msbprojection( const int n, const unsigned char* original, const unsigned char* perturbed, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { output[elem_idx] = original[elem_idx]; // bit_idx = 0 would be LSB on little-endian for (int bit_idx = 7; bit_idx >= 0; bit_idx--) { unsigned char mask = 1 << bit_idx; unsigned char original_bit = original[elem_idx] & mask; unsigned char perturbed_bit = perturbed[elem_idx] & mask; if (original_bit != perturbed_bit) { output[elem_idx] ^= mask; break; } } } } void cffi_uint8hammingdistance( const int n, const unsigned char* a, const unsigned char* b, int* dist ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { dist[elem_idx] = 0; unsigned char x = a[elem_idx] ^ b[elem_idx]; while(x != 0) { x = x & (x-1); dist[elem_idx]++; } } } void cffi_uint8flip( const int n, const bool* mask, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { unsigned char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(mask[8*elem_idx + bit_idx]) { xor_mask |= (1 << bit_idx); } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_uint8set( const int n, const bool* set1, const bool* set0, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { unsigned char set1_mask = 0; unsigned char set0_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (set1[8*elem_idx + bit_idx]) { set1_mask |= (1 << bit_idx); } if (set0[8*elem_idx + bit_idx]) { set0_mask |= (1 << bit_idx); } } unsigned char output_elem = input[elem_idx]; output_elem |= set1_mask; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_uint8setzero( const int n, const int m, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { unsigned char set0_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { if (bit_idx < m) { // first bit is lSB if little endian (Linux 32- or 64-bit) set0_mask |= (1 << bit_idx); } } unsigned char output_elem = input[elem_idx]; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_uint8randomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { unsigned char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { unsigned char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_uint8maskedrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { unsigned char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { unsigned char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_uint8individualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { unsigned char xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { unsigned char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[8*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[8*elem_idx + bit_idx]; } if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_uint8maskedindividualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { unsigned char xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { unsigned char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[8*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[8*elem_idx + bit_idx]; } if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_uint8bits( const int n, const unsigned char* input, bool* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) unsigned char mask = 1 << bit_idx; unsigned char masked_input = input[elem_idx] & mask; unsigned char bit = masked_input >> bit_idx; output[8*elem_idx + bit_idx] = bit; } } }
eavlSourceTopologySparseMapOp.h
// Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information. #ifndef EAVL_SOURCE_TOPOLOGY_SPARSE_MAP_OP_H #define EAVL_SOURCE_TOPOLOGY_SPARSE_MAP_OP_H #include "eavlCUDA.h" #include "eavlCellSet.h" #include "eavlCellSetExplicit.h" #include "eavlCellSetAllStructured.h" #include "eavlDataSet.h" #include "eavlArray.h" #include "eavlOpDispatch.h" #include "eavlOperation.h" #include "eavlTopology.h" #include "eavlException.h" #include <time.h> #ifdef HAVE_OPENMP #include <omp.h> #endif #ifndef DOXYGEN template <class CONN> struct eavlSourceTopologySparseMapOp_CPU { static inline eavlArray::Location location() { return eavlArray::HOST; } template <class F, class IN, class OUT, class INDEX> static void call(int nitems, CONN &conn, const IN s_inputs, OUT outputs, INDEX indices, F &functor) { int *sparseindices = get<0>(indices).array; int ids[MAX_LOCAL_TOPOLOGY_IDS]; // these are effectively our src indices #pragma omp parallel for private(ids) for (int denseindex = 0; denseindex < nitems; ++denseindex) { int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)]; int nids; int shapeType = conn.GetElementComponents(sparseindex, nids, ids); typename collecttype<OUT>::type out(collect(sparseindex, outputs)); out = functor(shapeType, nids, ids, s_inputs); } } }; #if defined __CUDACC__ template <class CONN, class F, class IN, class OUT, class INDEX> __global__ void eavlSourceTopologyGatherMapOp_kernel(int nitems, CONN conn, const IN s_inputs, OUT outputs, INDEX indices, F functor) { int *sparseindices = get<0>(indices).array; const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; int ids[MAX_LOCAL_TOPOLOGY_IDS]; for (int denseindex = threadID; denseindex < nitems; denseindex += numThreads) { int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)]; int nids; int shapeType = conn.GetElementComponents(sparseindex, nids, ids); collect(sparseindex, outputs) = functor(shapeType, nids, ids, s_inputs); } } template <class CONN> struct eavlSourceTopologyGatherMapOp_GPU { static inline eavlArray::Location location() { return eavlArray::DEVICE; } template <class F, class IN, class OUT, class INDEX> static void call(int nitems, CONN &conn, const IN s_inputs, OUT outputs, INDEX indices, F &functor) { int numThreads = 256; dim3 threads(numThreads, 1, 1); dim3 blocks (32, 1, 1); eavlSourceTopologyGatherMapOp_kernel<<< blocks, threads >>>(nitems, conn, s_inputs, outputs, indices, functor); CUDA_CHECK_ERROR(); } }; #endif #endif // **************************************************************************** // Class: eavlSourceTopologySparseMapOp // // Purpose: /// Map from one topological element in a mesh to another, with /// input arrays on the source topology and with outputs on the destination /// topology. All input and output arrays are indexed sparsely as /// specified by the index array. // // Programmer: Jeremy Meredith // Creation: August 1, 2013 // // Modifications: // **************************************************************************** template <class IS, class O, class INDEX, class F> class eavlSourceTopologySparseMapOp : public eavlOperation { protected: eavlCellSet *cells; eavlTopology topology; IS s_inputs; O outputs; INDEX indices; F functor; public: eavlSourceTopologySparseMapOp(eavlCellSet *c, eavlTopology t, IS is, O o, INDEX ind, F f) : cells(c), topology(t), s_inputs(is), outputs(o), indices(ind), functor(f) { } virtual void GoCPU() { eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); eavlOpDispatch<eavlSourceTopologySparseMapOp_CPU<eavlExplicitConnectivity> >(n, conn, s_inputs, outputs, indices, functor); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlSourceTopologySparseMapOp_CPU<eavlRegularConnectivity> >(n, conn, s_inputs, outputs, indices, functor); } } virtual void GoGPU() { #ifdef HAVE_CUDA eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); conn.shapetype.NeedOnDevice(); conn.connectivity.NeedOnDevice(); conn.mapCellToIndex.NeedOnDevice(); eavlOpDispatch<eavlSourceTopologySparseMapOp_GPU<eavlExplicitConnectivity> >(n, conn, s_inputs, outputs, indices, functor); conn.shapetype.NeedOnHost(); conn.connectivity.NeedOnHost(); conn.mapCellToIndex.NeedOnHost(); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlSourceTopologySparseMapOp_GPU<eavlRegularConnectivity> >(n, conn, s_inputs, outputs, indices, functor); } #else THROW(eavlException,"Executing GPU code without compiling under CUDA compiler."); #endif } }; // helper function for type deduction template <class IS, class O, class INDEX, class F> eavlSourceTopologySparseMapOp<IS,O,INDEX,F> *new_eavlSourceTopologySparseMapOp(eavlCellSet *c, eavlTopology t, IS is, O o, INDEX indices, F f) { return new eavlSourceTopologySparseMapOp<IS,O,INDEX,F>(c,t,is,o,indices,f); } #endif
matvec_int.c
//matvec.c //Multiplies a matrix by a vector #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define N_RUNS 1000 #define N 1200 // read timer in second double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } //Create a matrix and a vector and fill with random numbers void init(int **matrix, int *vector) { for (int i = 0; i<N; i++) { for (int j = 0; j<N; j++) { matrix[i][j] = (int)rand()/(int)(RAND_MAX/10.0); } vector[i] = (int)rand()/(int)(RAND_MAX/10.0); } } //Our sum function- what it does is pretty straight-forward. void sum(int **matrix, int *vector, int **dest) { int s = 0; for (int i = 0; i<N; i++) { s = 0; #pragma omp simd for (int j = 0; j<N; j++) { dest[i][j] = matrix[i][j] * vector[j]; } } } // Debug functions void serial(int **matrix, int *vector, int **dest) { for (int i = 0; i<N; i++) { for (int j = 0; j<N; j++) { dest[i][j] = matrix[i][j] * vector[j]; } } } void print_matrix(int **matrix) { for (int i = 0; i<8; i++) { printf("["); for (int j = 0; j<8; j++) { printf("%d ", matrix[i][j]); } puts("]"); } puts(""); } void print_vector(int *vector) { printf("["); for (int i = 0; i<8; i++) { printf("%d ", vector[i]); } puts("]"); } int check(int **A, int **B){ int difference = 0; for(int i = 0;i<N; i++){ for (int j = 0; j<N; j++) { difference += A[i][j]- B[i][j];} } return difference; } int main(int argc, char **argv) { //Set everything up int **dest_matrix = malloc(sizeof(int*)*N); int **serial_matrix = malloc(sizeof(int*)*N); int **matrix = malloc(sizeof(int*)*N); int *vector = malloc(sizeof(int)*N); for (int i = 0; i<N; i++) { dest_matrix[i] = malloc(sizeof(int)*N); serial_matrix[i] = malloc(sizeof(int)*N); matrix[i] = malloc(sizeof(int)*N); } srand(time(NULL)); init(matrix, vector); double start = read_timer(); for (int i = 0; i<N_RUNS; i++) sum(matrix, vector, dest_matrix); double t = (read_timer() - start); double start_serial = read_timer(); for (int i = 0; i<N_RUNS; i++) serial(matrix, vector, serial_matrix); double t_serial = (read_timer() - start_serial); print_matrix(matrix); print_vector(vector); puts("=\n"); print_matrix(dest_matrix); puts("---------------------------------"); print_matrix(serial_matrix); double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t); double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial); printf("==================================================================\n"); printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n"); printf("------------------------------------------------------------------\n"); printf("Matrix-vector (SIMD):\t\t%4f\t%4f\n", t, gflops); printf("Matrix-vector (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial); printf("Correctness check: %d\n", check(dest_matrix,serial_matrix)); free(dest_matrix); free(serial_matrix); free(matrix); free(vector); return 0; }
convolution_sgemm_pack1ton_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack1ton_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); // Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const __fp16* bias = _bias; // permute Mat tmp; tmp.create(maxk, inch, size, 2u, 1, opt.workspace_allocator); { #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < size; i++) { __fp16* tmpptr = tmp.channel(i); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; img0 += size; tmpptr += 1; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); int i = 0; for (; i < size; i++) { const __fp16* tmpptr = tmp.channel(i); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); if (bias) { _sum = vle16_v_f16m1(bias + p * packn, vl); } for (int j = 0; j < nn; j++) { __fp16 val = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); kptr0 += packn; } vse16_v_f16m1(outptr0, _sum, vl); outptr0 += packn; } } } static void convolution_im2col_sgemm_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); __fp16* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack1ton_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt); }
convolution_7x7_pack1ton.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv7x7s2_pack1ton_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); vfloat32m1_t _bias0 = bias ? vle32_v_f32m1(bias + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl); out0.fill(_bias0); for (int q = 0; q < inch; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); const float* r5 = img0.row(5); const float* r6 = img0.row(6); const float* kptr = kernel.channel(p).row(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl); vfloat32m1_t _sum1 = vle32_v_f32m1(outptr0 + packn, vl); vfloat32m1_t _sum2 = vle32_v_f32m1(outptr0 + packn * 2, vl); vfloat32m1_t _sum3 = vle32_v_f32m1(outptr0 + packn * 3, vl); vfloat32m1_t _sum4 = vle32_v_f32m1(outptr0 + packn * 4, vl); vfloat32m1_t _sum5 = vle32_v_f32m1(outptr0 + packn * 5, vl); vfloat32m1_t _sum6 = vle32_v_f32m1(outptr0 + packn * 6, vl); vfloat32m1_t _sum7 = vle32_v_f32m1(outptr0 + packn * 7, vl); vfloat32m1_t _k00 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k01 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k02 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k03 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k04 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k05 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k06 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[2], _k00, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[4], _k00, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[6], _k00, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[8], _k00, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[10], _k00, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[12], _k00, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[14], _k00, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[3], _k01, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[5], _k01, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[7], _k01, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[9], _k01, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[11], _k01, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[13], _k01, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[15], _k01, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[4], _k02, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[6], _k02, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[8], _k02, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[10], _k02, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[12], _k02, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[14], _k02, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[16], _k02, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[3], _k03, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[5], _k03, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[7], _k03, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[9], _k03, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[11], _k03, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[13], _k03, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[15], _k03, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[17], _k03, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[4], _k04, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[6], _k04, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[8], _k04, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[10], _k04, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[12], _k04, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[14], _k04, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[16], _k04, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[18], _k04, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[5], _k05, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[7], _k05, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[9], _k05, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[11], _k05, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[13], _k05, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[15], _k05, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[17], _k05, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[19], _k05, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[6], _k06, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[8], _k06, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[10], _k06, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[12], _k06, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[14], _k06, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[16], _k06, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[18], _k06, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[20], _k06, vl); vfloat32m1_t _k10 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k11 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k12 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k13 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k14 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k15 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k16 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[2], _k10, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[4], _k10, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[6], _k10, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[8], _k10, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[10], _k10, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[12], _k10, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[14], _k10, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[3], _k11, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[5], _k11, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[7], _k11, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[9], _k11, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[11], _k11, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[13], _k11, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[15], _k11, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[4], _k12, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[6], _k12, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[8], _k12, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[10], _k12, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[12], _k12, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[14], _k12, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[16], _k12, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[3], _k13, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[5], _k13, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[7], _k13, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[9], _k13, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[11], _k13, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[13], _k13, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[15], _k13, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[17], _k13, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[4], _k14, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[6], _k14, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[8], _k14, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[10], _k14, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[12], _k14, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[14], _k14, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[16], _k14, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[18], _k14, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[5], _k15, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[7], _k15, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[9], _k15, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[11], _k15, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[13], _k15, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[15], _k15, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[17], _k15, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[19], _k15, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[6], _k16, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[8], _k16, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[10], _k16, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[12], _k16, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[14], _k16, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[16], _k16, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[18], _k16, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[20], _k16, vl); vfloat32m1_t _k20 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k21 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k22 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k23 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k24 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k25 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k26 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[2], _k20, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[4], _k20, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[6], _k20, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[8], _k20, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[10], _k20, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[12], _k20, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[14], _k20, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[3], _k21, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[5], _k21, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[7], _k21, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[9], _k21, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[11], _k21, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[13], _k21, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[15], _k21, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[4], _k22, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[6], _k22, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[8], _k22, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[10], _k22, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[12], _k22, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[14], _k22, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[16], _k22, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[3], _k23, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[5], _k23, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[7], _k23, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[9], _k23, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[11], _k23, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[13], _k23, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[15], _k23, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[17], _k23, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[4], _k24, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[6], _k24, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[8], _k24, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[10], _k24, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[12], _k24, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[14], _k24, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[16], _k24, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[18], _k24, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[5], _k25, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[7], _k25, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[9], _k25, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[11], _k25, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[13], _k25, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[15], _k25, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[17], _k25, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[19], _k25, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[6], _k26, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[8], _k26, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[10], _k26, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[12], _k26, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[14], _k26, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[16], _k26, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[18], _k26, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[20], _k26, vl); vfloat32m1_t _k30 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k31 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k32 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k33 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k34 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k35 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k36 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r3[0], _k30, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[2], _k30, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[4], _k30, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[6], _k30, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r3[8], _k30, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r3[10], _k30, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r3[12], _k30, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r3[14], _k30, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[1], _k31, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[3], _k31, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[5], _k31, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[7], _k31, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r3[9], _k31, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r3[11], _k31, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r3[13], _k31, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r3[15], _k31, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[2], _k32, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[4], _k32, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[6], _k32, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[8], _k32, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r3[10], _k32, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r3[12], _k32, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r3[14], _k32, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r3[16], _k32, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[3], _k33, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[5], _k33, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[7], _k33, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[9], _k33, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r3[11], _k33, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r3[13], _k33, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r3[15], _k33, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r3[17], _k33, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[4], _k34, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[6], _k34, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[8], _k34, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[10], _k34, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r3[12], _k34, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r3[14], _k34, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r3[16], _k34, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r3[18], _k34, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[5], _k35, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[7], _k35, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[9], _k35, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[11], _k35, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r3[13], _k35, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r3[15], _k35, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r3[17], _k35, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r3[19], _k35, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[6], _k36, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[8], _k36, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[10], _k36, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[12], _k36, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r3[14], _k36, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r3[16], _k36, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r3[18], _k36, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r3[20], _k36, vl); vfloat32m1_t _k40 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k41 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k42 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k43 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k44 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k45 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k46 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r4[0], _k40, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[2], _k40, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[4], _k40, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[6], _k40, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r4[8], _k40, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r4[10], _k40, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r4[12], _k40, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r4[14], _k40, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[1], _k41, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[3], _k41, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[5], _k41, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[7], _k41, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r4[9], _k41, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r4[11], _k41, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r4[13], _k41, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r4[15], _k41, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[2], _k42, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[4], _k42, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[6], _k42, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[8], _k42, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r4[10], _k42, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r4[12], _k42, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r4[14], _k42, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r4[16], _k42, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[3], _k43, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[5], _k43, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[7], _k43, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[9], _k43, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r4[11], _k43, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r4[13], _k43, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r4[15], _k43, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r4[17], _k43, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[4], _k44, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[6], _k44, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[8], _k44, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[10], _k44, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r4[12], _k44, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r4[14], _k44, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r4[16], _k44, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r4[18], _k44, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[5], _k45, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[7], _k45, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[9], _k45, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[11], _k45, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r4[13], _k45, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r4[15], _k45, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r4[17], _k45, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r4[19], _k45, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[6], _k46, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[8], _k46, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[10], _k46, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[12], _k46, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r4[14], _k46, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r4[16], _k46, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r4[18], _k46, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r4[20], _k46, vl); vfloat32m1_t _k50 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k51 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k52 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k53 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k54 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k55 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k56 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r5[0], _k50, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[2], _k50, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[4], _k50, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[6], _k50, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r5[8], _k50, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r5[10], _k50, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r5[12], _k50, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r5[14], _k50, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[1], _k51, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[3], _k51, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[5], _k51, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[7], _k51, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r5[9], _k51, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r5[11], _k51, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r5[13], _k51, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r5[15], _k51, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[2], _k52, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[4], _k52, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[6], _k52, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[8], _k52, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r5[10], _k52, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r5[12], _k52, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r5[14], _k52, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r5[16], _k52, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[3], _k53, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[5], _k53, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[7], _k53, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[9], _k53, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r5[11], _k53, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r5[13], _k53, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r5[15], _k53, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r5[17], _k53, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[4], _k54, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[6], _k54, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[8], _k54, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[10], _k54, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r5[12], _k54, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r5[14], _k54, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r5[16], _k54, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r5[18], _k54, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[5], _k55, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[7], _k55, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[9], _k55, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[11], _k55, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r5[13], _k55, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r5[15], _k55, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r5[17], _k55, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r5[19], _k55, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[6], _k56, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[8], _k56, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[10], _k56, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[12], _k56, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r5[14], _k56, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r5[16], _k56, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r5[18], _k56, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r5[20], _k56, vl); vfloat32m1_t _k60 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k61 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k62 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k63 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k64 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k65 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k66 = vle32_v_f32m1(kptr + packn * 6, vl); kptr -= packn * 42; _sum0 = vfmacc_vf_f32m1(_sum0, r6[0], _k60, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[2], _k60, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[4], _k60, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[6], _k60, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r6[8], _k60, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r6[10], _k60, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r6[12], _k60, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r6[14], _k60, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[1], _k61, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[3], _k61, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[5], _k61, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[7], _k61, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r6[9], _k61, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r6[11], _k61, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r6[13], _k61, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r6[15], _k61, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[2], _k62, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[4], _k62, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[6], _k62, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[8], _k62, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r6[10], _k62, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r6[12], _k62, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r6[14], _k62, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r6[16], _k62, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[3], _k63, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[5], _k63, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[7], _k63, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[9], _k63, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r6[11], _k63, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r6[13], _k63, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r6[15], _k63, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r6[17], _k63, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[4], _k64, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[6], _k64, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[8], _k64, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[10], _k64, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r6[12], _k64, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r6[14], _k64, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r6[16], _k64, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r6[18], _k64, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[5], _k65, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[7], _k65, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[9], _k65, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[11], _k65, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r6[13], _k65, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r6[15], _k65, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r6[17], _k65, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r6[19], _k65, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[6], _k66, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[8], _k66, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[10], _k66, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[12], _k66, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r6[14], _k66, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r6[16], _k66, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r6[18], _k66, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r6[20], _k66, vl); vse32_v_f32m1(outptr0, _sum0, vl); vse32_v_f32m1(outptr0 + packn, _sum1, vl); vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl); vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl); vse32_v_f32m1(outptr0 + packn * 4, _sum4, vl); vse32_v_f32m1(outptr0 + packn * 5, _sum5, vl); vse32_v_f32m1(outptr0 + packn * 6, _sum6, vl); vse32_v_f32m1(outptr0 + packn * 7, _sum7, vl); outptr0 += packn * 8; r0 += 16; r1 += 16; r2 += 16; r3 += 16; r4 += 16; r5 += 16; r6 += 16; } for (; j + 3 < outw; j += 4) { vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl); vfloat32m1_t _sum1 = vle32_v_f32m1(outptr0 + packn, vl); vfloat32m1_t _sum2 = vle32_v_f32m1(outptr0 + packn * 2, vl); vfloat32m1_t _sum3 = vle32_v_f32m1(outptr0 + packn * 3, vl); vfloat32m1_t _k00 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k01 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k02 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k03 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k04 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k05 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k06 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[2], _k00, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[4], _k00, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[6], _k00, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[3], _k01, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[5], _k01, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[7], _k01, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[4], _k02, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[6], _k02, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[8], _k02, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[3], _k03, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[5], _k03, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[7], _k03, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[9], _k03, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[4], _k04, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[6], _k04, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[8], _k04, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[10], _k04, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[5], _k05, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[7], _k05, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[9], _k05, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[11], _k05, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[6], _k06, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[8], _k06, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[10], _k06, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[12], _k06, vl); vfloat32m1_t _k10 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k11 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k12 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k13 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k14 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k15 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k16 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[2], _k10, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[4], _k10, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[6], _k10, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[3], _k11, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[5], _k11, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[7], _k11, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[4], _k12, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[6], _k12, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[8], _k12, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[3], _k13, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[5], _k13, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[7], _k13, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[9], _k13, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[4], _k14, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[6], _k14, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[8], _k14, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[10], _k14, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[5], _k15, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[7], _k15, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[9], _k15, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[11], _k15, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[6], _k16, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[8], _k16, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[10], _k16, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[12], _k16, vl); vfloat32m1_t _k20 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k21 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k22 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k23 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k24 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k25 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k26 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[2], _k20, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[4], _k20, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[6], _k20, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[3], _k21, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[5], _k21, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[7], _k21, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[4], _k22, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[6], _k22, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[8], _k22, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[3], _k23, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[5], _k23, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[7], _k23, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[9], _k23, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[4], _k24, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[6], _k24, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[8], _k24, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[10], _k24, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[5], _k25, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[7], _k25, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[9], _k25, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[11], _k25, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[6], _k26, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[8], _k26, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[10], _k26, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[12], _k26, vl); vfloat32m1_t _k30 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k31 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k32 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k33 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k34 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k35 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k36 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r3[0], _k30, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[2], _k30, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[4], _k30, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[6], _k30, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[1], _k31, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[3], _k31, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[5], _k31, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[7], _k31, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[2], _k32, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[4], _k32, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[6], _k32, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[8], _k32, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[3], _k33, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[5], _k33, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[7], _k33, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[9], _k33, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[4], _k34, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[6], _k34, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[8], _k34, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[10], _k34, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[5], _k35, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[7], _k35, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[9], _k35, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[11], _k35, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[6], _k36, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r3[8], _k36, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r3[10], _k36, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r3[12], _k36, vl); vfloat32m1_t _k40 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k41 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k42 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k43 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k44 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k45 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k46 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r4[0], _k40, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[2], _k40, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[4], _k40, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[6], _k40, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[1], _k41, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[3], _k41, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[5], _k41, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[7], _k41, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[2], _k42, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[4], _k42, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[6], _k42, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[8], _k42, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[3], _k43, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[5], _k43, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[7], _k43, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[9], _k43, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[4], _k44, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[6], _k44, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[8], _k44, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[10], _k44, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[5], _k45, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[7], _k45, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[9], _k45, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[11], _k45, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[6], _k46, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r4[8], _k46, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r4[10], _k46, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r4[12], _k46, vl); vfloat32m1_t _k50 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k51 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k52 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k53 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k54 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k55 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k56 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r5[0], _k50, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[2], _k50, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[4], _k50, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[6], _k50, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[1], _k51, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[3], _k51, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[5], _k51, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[7], _k51, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[2], _k52, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[4], _k52, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[6], _k52, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[8], _k52, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[3], _k53, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[5], _k53, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[7], _k53, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[9], _k53, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[4], _k54, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[6], _k54, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[8], _k54, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[10], _k54, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[5], _k55, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[7], _k55, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[9], _k55, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[11], _k55, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[6], _k56, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r5[8], _k56, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r5[10], _k56, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r5[12], _k56, vl); vfloat32m1_t _k60 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k61 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k62 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k63 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k64 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k65 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k66 = vle32_v_f32m1(kptr + packn * 6, vl); kptr -= packn * 42; _sum0 = vfmacc_vf_f32m1(_sum0, r6[0], _k60, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[2], _k60, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[4], _k60, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[6], _k60, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[1], _k61, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[3], _k61, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[5], _k61, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[7], _k61, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[2], _k62, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[4], _k62, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[6], _k62, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[8], _k62, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[3], _k63, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[5], _k63, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[7], _k63, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[9], _k63, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[4], _k64, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[6], _k64, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[8], _k64, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[10], _k64, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[5], _k65, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[7], _k65, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[9], _k65, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[11], _k65, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[6], _k66, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r6[8], _k66, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r6[10], _k66, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r6[12], _k66, vl); vse32_v_f32m1(outptr0, _sum0, vl); vse32_v_f32m1(outptr0 + packn, _sum1, vl); vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl); vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl); outptr0 += packn * 4; r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; r5 += 8; r6 += 8; } for (; j < outw; j++) { vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl); vfloat32m1_t _k00 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k01 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k02 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k03 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k04 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k05 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k06 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[3], _k03, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[4], _k04, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[5], _k05, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[6], _k06, vl); vfloat32m1_t _k10 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k11 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k12 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k13 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k14 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k15 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k16 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[3], _k13, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[4], _k14, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[5], _k15, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[6], _k16, vl); vfloat32m1_t _k20 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k21 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k22 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k23 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k24 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k25 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k26 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[3], _k23, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[4], _k24, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[5], _k25, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[6], _k26, vl); vfloat32m1_t _k30 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k31 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k32 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k33 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k34 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k35 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k36 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r3[0], _k30, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[1], _k31, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[2], _k32, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[3], _k33, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[4], _k34, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[5], _k35, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r3[6], _k36, vl); vfloat32m1_t _k40 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k41 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k42 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k43 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k44 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k45 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k46 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r4[0], _k40, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[1], _k41, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[2], _k42, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[3], _k43, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[4], _k44, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[5], _k45, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r4[6], _k46, vl); vfloat32m1_t _k50 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k51 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k52 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k53 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k54 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k55 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k56 = vle32_v_f32m1(kptr + packn * 6, vl); kptr += packn * 7; _sum0 = vfmacc_vf_f32m1(_sum0, r5[0], _k50, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[1], _k51, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[2], _k52, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[3], _k53, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[4], _k54, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[5], _k55, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r5[6], _k56, vl); vfloat32m1_t _k60 = vle32_v_f32m1(kptr, vl); vfloat32m1_t _k61 = vle32_v_f32m1(kptr + packn, vl); vfloat32m1_t _k62 = vle32_v_f32m1(kptr + packn * 2, vl); vfloat32m1_t _k63 = vle32_v_f32m1(kptr + packn * 3, vl); vfloat32m1_t _k64 = vle32_v_f32m1(kptr + packn * 4, vl); vfloat32m1_t _k65 = vle32_v_f32m1(kptr + packn * 5, vl); vfloat32m1_t _k66 = vle32_v_f32m1(kptr + packn * 6, vl); kptr -= packn * 42; _sum0 = vfmacc_vf_f32m1(_sum0, r6[0], _k60, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[1], _k61, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[2], _k62, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[3], _k63, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[4], _k64, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[5], _k65, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r6[6], _k66, vl); vse32_v_f32m1(outptr0, _sum0, vl); outptr0 += packn; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; r5 += 2; r6 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; r5 += tailstep; r6 += tailstep; } } } }